code
stringlengths
281
23.7M
def git_upstream(destination): git_url = '/'.join((cli.args.baseurl, default_fork)) git_cmd = ['git', '-C', destination, 'remote', 'add', 'upstream', git_url] with subprocess.Popen(git_cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True, encoding='utf-8') as p: for line in p.stdout: print(line, end='') if (p.returncode == 0): cli.log.info('Added %s as remote upstream.', git_url) return True else: cli.log.error('%s exited %d', ' '.join(git_cmd), p.returncode) return False
class RandomizedSearchTest(unittest.TestCase): def test_clone_estimator(self): params = dict(lr=tune.loguniform(0.1, 1)) random_search = TuneSearchCV(SGDClassifier(), param_distributions=params, return_train_score=True, n_jobs=2) clone(random_search) random_search = TuneSearchCV(SGDClassifier(), early_stopping=True, param_distributions=params, return_train_score=True, n_jobs=2) clone(random_search) random_search = TuneSearchCV(SGDClassifier(), early_stopping='HyperBandScheduler', param_distributions=params, return_train_score=True, n_jobs=2) clone(random_search) random_search = TuneSearchCV(SGDClassifier(), early_stopping=True, search_optimization='bohb', param_distributions=params, return_train_score=True, n_jobs=2) clone(random_search) def test_random_search_cv_results(self): (X, y) = make_classification(n_samples=50, n_features=50, n_informative=3, random_state=0) n_splits = 3 n_search_iter = 30 params = dict(C=expon(scale=10), gamma=expon(scale=0.1)) random_search = TuneSearchCV(SVC(), n_trials=n_search_iter, cv=n_splits, param_distributions=params, return_train_score=True, n_jobs=2) random_search.fit(X, y) param_keys = ('param_C', 'param_gamma') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'rank_train_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'time_total_s', 'training_iteration') n_cand = n_search_iter def test_check_cv_results_array_types(cv_results, param_keys, score_keys): self.assertTrue(all((isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys))) self.assertTrue(all(((cv_results[key].dtype == object) for key in param_keys))) self.assertFalse(any((isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys))) self.assertTrue(all(((cv_results[key].dtype == np.float64) for key in score_keys if ((not key.startswith('rank')) and (key != 'training_iteration'))))) self.assertEquals(cv_results['rank_test_score'].dtype, np.int32) def test_check_cv_results_keys(cv_results, param_keys, score_keys, n_cand): assert_array_equal(sorted(cv_results.keys()), sorted(((param_keys + score_keys) + ('params',)))) self.assertTrue(all(((cv_results[key].shape == (n_cand,)) for key in (param_keys + score_keys)))) cv_results = random_search.cv_results_ test_check_cv_results_array_types(cv_results, param_keys, score_keys) test_check_cv_results_keys(cv_results, param_keys, score_keys, n_cand) self.assertFalse((any(cv_results['param_C'].mask) or any(cv_results['param_gamma'].mask))) def test_local_dir(self): digits = datasets.load_digits() x = digits.data y = digits.target clf = SGDClassifier() parameter_grid = {'alpha': Real(0.0001, 0.1, prior='log-uniform'), 'epsilon': Real(0.01, 0.1)} scheduler = MedianStoppingRule(grace_period=10.0) tune_search = TuneSearchCV(clf, parameter_grid, early_stopping=scheduler, max_iters=10, local_dir=os.path.abspath('./test-result')) tune_search.fit(x, y) self.assertTrue((len(os.listdir('./test-result')) != 0)) def test_multi_best_classification(self): digits = datasets.load_digits() x = digits.data y = digits.target model = SGDClassifier() parameter_grid = {'alpha': [0.0001, 0.1, 1], 'epsilon': [0.01, 0.1]} scoring = ('accuracy', 'f1_micro') search_methods = ['random', 'bayesian', 'hyperopt', 'bohb', 'optuna'] for search_method in search_methods: tune_search = TuneSearchCV(model, parameter_grid, scoring=scoring, search_optimization=search_method, cv=2, n_trials=3, n_jobs=1, refit='accuracy') tune_search.fit(x, y) self.assertAlmostEqual(tune_search.best_score_, max(tune_search.cv_results_['mean_test_accuracy']), places=10) p = tune_search.cv_results_['params'] scores = tune_search.cv_results_['mean_test_accuracy'] cv_best_param = max(list(zip(scores, p)), key=(lambda pair: pair[0]))[1] self.assertEqual(tune_search.best_params_, cv_best_param) def test_multi_best_classification_scoring_dict(self): digits = datasets.load_digits() x = digits.data y = digits.target model = SGDClassifier() parameter_grid = {'alpha': [0.0001, 0.1, 1], 'epsilon': [0.01, 0.1]} scoring = {'acc': 'accuracy', 'f1': 'f1_micro'} search_methods = ['random', 'bayesian', 'hyperopt', 'bohb', 'optuna'] for search_method in search_methods: tune_search = TuneSearchCV(model, parameter_grid, scoring=scoring, search_optimization=search_method, cv=2, n_trials=3, n_jobs=1, refit='acc') tune_search.fit(x, y) self.assertAlmostEqual(tune_search.best_score_, max(tune_search.cv_results_['mean_test_acc']), places=10) p = tune_search.cv_results_['params'] scores = tune_search.cv_results_['mean_test_acc'] cv_best_param = max(list(zip(scores, p)), key=(lambda pair: pair[0]))[1] self.assertEqual(tune_search.best_params_, cv_best_param) def test_multi_best_regression(self): (x, y) = make_regression(n_samples=100, n_features=10, n_informative=5) model = SGDRegressor() parameter_grid = {'alpha': [0.0001, 0.1, 1], 'epsilon': [0.01, 0.1]} scoring = ('neg_mean_absolute_error', 'neg_mean_squared_error') search_methods = ['random', 'bayesian', 'hyperopt', 'bohb', 'optuna'] for search_method in search_methods: tune_search = TuneSearchCV(model, parameter_grid, scoring=scoring, search_optimization=search_method, cv=2, n_trials=3, n_jobs=1, refit='neg_mean_absolute_error') tune_search.fit(x, y) self.assertAlmostEqual(tune_search.best_score_, max(tune_search.cv_results_['mean_test_neg_mean_absolute_error']), places=10) p = tune_search.cv_results_['params'] scores = tune_search.cv_results_['mean_test_neg_mean_absolute_error'] cv_best_param = max(list(zip(scores, p)), key=(lambda pair: pair[0]))[1] self.assertEqual(tune_search.best_params_, cv_best_param) def test_multi_refit_false(self): digits = datasets.load_digits() x = digits.data y = digits.target model = SGDClassifier() parameter_grid = {'alpha': [0.0001, 0.1, 1], 'epsilon': [0.01, 0.1]} scoring = ('accuracy', 'f1_micro') tune_search = TuneSearchCV(model, parameter_grid, scoring=scoring, search_optimization='random', cv=2, n_trials=3, n_jobs=1, refit=False) with self.assertRaises(ValueError) as exc: tune_search.fit(x, y) self.assertTrue(('When using multimetric scoring, refit must be the name of the scorer used to ' in str(exc.exception))) def test_warm_start_detection(self): parameter_grid = {'alpha': Real(0.0001, 0.1, prior='log-uniform')} from sklearn.ensemble import VotingClassifier, RandomForestClassifier clf = VotingClassifier(estimators=[('rf', RandomForestClassifier(n_estimators=50, random_state=0))]) tune_search = TuneSearchCV(clf, parameter_grid, n_jobs=1, max_iters=10, local_dir=os.path.abspath('./test-result')) self.assertEqual(tune_search.early_stop_type, EarlyStopping.NO_EARLY_STOP) from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier(random_state=0) tune_search2 = TuneSearchCV(clf, parameter_grid, n_jobs=1, max_iters=10, local_dir=os.path.abspath('./test-result')) self.assertEqual(tune_search2.early_stop_type, EarlyStopping.NO_EARLY_STOP) from sklearn.linear_model import LogisticRegression clf = LogisticRegression() tune_search3 = TuneSearchCV(clf, parameter_grid, n_jobs=1, max_iters=10, local_dir=os.path.abspath('./test-result')) self.assertEqual(tune_search3.early_stop_type, EarlyStopping.NO_EARLY_STOP) tune_search4 = TuneSearchCV(clf, parameter_grid, early_stopping=True, n_jobs=1, max_iters=10, local_dir=os.path.abspath('./test-result')) self.assertEqual(tune_search4.early_stop_type, EarlyStopping.WARM_START_ITER) clf = RandomForestClassifier() tune_search5 = TuneSearchCV(clf, parameter_grid, early_stopping=True, n_jobs=1, max_iters=10, local_dir=os.path.abspath('./test-result')) self.assertEqual(tune_search5.early_stop_type, EarlyStopping.WARM_START_ENSEMBLE) def test_warm_start_error(self): parameter_grid = {'alpha': Real(0.0001, 0.1, prior='log-uniform')} from sklearn.ensemble import VotingClassifier, RandomForestClassifier clf = VotingClassifier(estimators=[('rf', RandomForestClassifier(n_estimators=50, random_state=0))]) tune_search = TuneSearchCV(clf, parameter_grid, n_jobs=1, early_stopping=False, max_iters=10, local_dir=os.path.abspath('./test-result')) self.assertFalse(tune_search._can_early_stop()) with self.assertRaises(ValueError): tune_search = TuneSearchCV(clf, parameter_grid, n_jobs=1, early_stopping=True, max_iters=10, local_dir=os.path.abspath('./test-result')) from sklearn.linear_model import LogisticRegression clf = LogisticRegression() with self.assertRaises(ValueError): parameter_grid = {'max_iter': [1, 2]} TuneSearchCV(clf, parameter_grid, early_stopping=True, n_jobs=1, max_iters=10, local_dir=os.path.abspath('./test-result')) from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() with self.assertRaises(ValueError): parameter_grid = {'n_estimators': [1, 2]} TuneSearchCV(clf, parameter_grid, early_stopping=True, n_jobs=1, max_iters=10, local_dir=os.path.abspath('./test-result')) def test_warn_reduce_maxiters(self): parameter_grid = {'alpha': Real(0.0001, 0.1, prior='log-uniform')} from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(max_depth=2, random_state=0) with self.assertWarnsRegex(UserWarning, 'max_iters is set'): TuneSearchCV(clf, parameter_grid, max_iters=10, local_dir=os.path.abspath('./test-result')) with self.assertWarnsRegex(UserWarning, 'max_iters is set'): TuneSearchCV(SGDClassifier(), parameter_grid, max_iters=10, local_dir=os.path.abspath('./test-result')) def test_warn_early_stop(self): (X, y) = make_classification(n_samples=50, n_features=5, n_informative=3, random_state=0) with self.assertWarnsRegex(UserWarning, 'max_iters = 1'): TuneSearchCV(LogisticRegression(), {'C': [1, 2]}, early_stopping=True).fit(X, y) with self.assertWarnsRegex(UserWarning, 'max_iters = 1'): TuneSearchCV(SGDClassifier(), {'epsilon': [0.1, 0.2]}, early_stopping=True).fit(X, y) def test_warn_user_params(self): (X, y) = make_classification(n_samples=50, n_features=50, n_informative=3, random_state=0) clf = MockClassifier() search = TuneSearchCV(clf, {'foo_param': [2.0, 3.0, 4.0]}, cv=2, max_iters=2) with self.assertWarnsRegex(UserWarning, 'The following preset tune.run parameters will be overriden by tune_params: fail_fast.'): search.fit(X, y, tune_params={'fail_fast': 'raise'}) ((not has_xgboost()), 'xgboost not installed') def test_early_stop_xgboost_warn(self): (X, y) = make_classification(n_samples=50, n_features=5, n_informative=3, random_state=0) from xgboost.sklearn import XGBClassifier with self.assertWarnsRegex(UserWarning, 'github.com'): TuneSearchCV(XGBClassifier(), {'C': [1, 2]}, early_stopping=True, max_iters=10).fit(X, y) with self.assertWarnsRegex(UserWarning, 'max_iters'): TuneSearchCV(XGBClassifier(), {'C': [1, 2]}, early_stopping=True, max_iters=1).fit(X, y) ((not has_required_lightgbm_version()), 'lightgbm not installed') def test_early_stop_lightgbm_warn(self): (X, y) = make_classification(n_samples=50, n_features=5, n_informative=3, random_state=0) from lightgbm import LGBMClassifier with self.assertWarnsRegex(UserWarning, 'lightgbm'): TuneSearchCV(LGBMClassifier(), {'learning_rate': [0.1, 0.5]}, early_stopping=True, max_iters=10).fit(X, y) with self.assertWarnsRegex(UserWarning, 'max_iters'): TuneSearchCV(LGBMClassifier(), {'learning_rate': [0.1, 0.5]}, early_stopping=True, max_iters=1).fit(X, y) ((not has_catboost()), 'catboost not installed') def test_early_stop_catboost_warn(self): (X, y) = make_classification(n_samples=50, n_features=5, n_informative=3, random_state=0) from catboost import CatBoostClassifier with self.assertWarnsRegex(UserWarning, 'Catboost'): TuneSearchCV(CatBoostClassifier(), {'learning_rate': [0.1, 0.5]}, early_stopping=True, max_iters=10).fit(X, y) with self.assertWarnsRegex(UserWarning, 'max_iters'): TuneSearchCV(CatBoostClassifier(), {'learning_rate': [0.1, 0.5]}, early_stopping=True, max_iters=1).fit(X, y) def test_pipeline_early_stop(self): digits = datasets.load_digits() x = digits.data y = digits.target pipe = Pipeline([('reduce_dim', PCA()), ('classify', SGDClassifier())]) parameter_grid = [{'classify__alpha': [0.0001, 0.1, 1], 'classify__epsilon': [0.01, 0.1]}] with self.assertRaises(ValueError) as exc: TuneSearchCV(pipe, parameter_grid, early_stopping=True, pipeline_auto_early_stop=False, max_iters=10) self.assertTrue(('Early stopping is not supported because the estimator does not have `partial_fit`, does not support warm_start, or is a tree classifier. Set `early_stopping=False`.' in str(exc.exception))) tune_search = TuneSearchCV(pipe, parameter_grid, early_stopping=True, max_iters=10) tune_search.fit(x, y) ((not has_xgboost()), 'xgboost not installed') def test_early_stop_xgboost_pipeline(self): from xgboost.sklearn import XGBClassifier from sklearn.pipeline import Pipeline TuneSearchCV(Pipeline([('model', XGBClassifier())]), {'model__C': [1, 2]}, early_stopping=True, pipeline_auto_early_stop=True, cv=2, n_trials=2, max_iters=10) ((not has_required_lightgbm_version()), 'lightgbm not installed') def test_early_stop_lightgbm_pipeline(self): from lightgbm import LGBMClassifier from sklearn.pipeline import Pipeline TuneSearchCV(Pipeline([('model', LGBMClassifier())]), {'model__learning_rate': [0.1, 0.5]}, early_stopping=True, pipeline_auto_early_stop=True, cv=2, n_trials=2, max_iters=10) ((not has_catboost()), 'catboost not installed') def test_early_stop_catboost_pipeline(self): from catboost import CatBoostClassifier from sklearn.pipeline import Pipeline TuneSearchCV(Pipeline([('model', CatBoostClassifier())]), {'model__learning_rate': [0.1, 0.5]}, early_stopping=True, pipeline_auto_early_stop=True, cv=2, n_trials=2, max_iters=10) def test_max_iters(self): (X, y) = make_classification(n_samples=50, n_features=50, n_informative=3, random_state=0) clf = PlateauClassifier(converge_after=20) search = TuneSearchCV(clf, {'foo_param': [2.0, 3.0, 4.0]}, cv=2, max_iters=6, early_stopping=True) search.fit(X, y) print(search.cv_results_) for iters in search.cv_results_['training_iteration']: self.assertLessEqual(iters, 6) def test_plateau(self): try: from ray.tune.stopper import TrialPlateauStopper except ImportError: self.skipTest('`TrialPlateauStopper` not available in current Ray version.') return (X, y) = make_classification(n_samples=50, n_features=50, n_informative=3, random_state=0) clf = PlateauClassifier(converge_after=4) stopper = TrialPlateauStopper(metric='objective') search = TuneSearchCV(clf, {'foo_param': [2.0, 3.0, 4.0]}, cv=2, max_iters=20, stopper=stopper, early_stopping=True) search.fit(X, y) print(search.cv_results_) for iters in search.cv_results_['training_iteration']: self.assertLessEqual(iters, 8) def test_timeout(self): (X, y) = make_classification(n_samples=50, n_features=50, n_informative=3, random_state=0) clf = SleepClassifier() search = TuneSearchCV(clf, {'foo_param': [1.1, 1.2, 2.5]}, time_budget_s=5.0, cv=2, max_iters=5, early_stopping=True) start = time.time() search.fit(X, y) taken = (time.time() - start) print(search) self.assertLess(taken, 25.0) def test_wrong_mode(self): model = SGDClassifier() parameter_grid = {'alpha': [0.0001, 0.1, 1], 'epsilon': [0.01, 0.1]} TuneSearchCV(model, parameter_grid, search_optimization='random', mode='min') TuneSearchCV(model, parameter_grid, search_optimization='random', mode='max') with self.assertRaises(AssertionError) as exc: TuneSearchCV(model, parameter_grid, search_optimization='random', mode='this_is_an_invalid_mode') self.assertTrue(("`mode` must be 'min' or 'max'" in str(exc.exception))) def test_all_trials_failed(self): (X, y) = make_classification(n_samples=50, n_features=50, n_informative=3, random_state=0) model = LinearSVC() parameter_grid = {'penalty': ['l2'], 'loss': ['hinge'], 'dual': [False], 'C': [0.0001, 0.001]} search = TuneSearchCV(estimator=model, param_distributions=parameter_grid, search_optimization='random', n_trials=2, error_score=np.nan, verbose=True, random_state=130, refit=False) with self.assertRaises(ValueError) as exc: search.fit(X, y) self.assertTrue(("Couldn't obtain best config" in str(exc.exception))) def test_error_score(self): (X, y) = make_classification(n_samples=50, n_features=50, n_informative=3, random_state=0) model = LinearSVC() parameter_grid = {'penalty': ['l2'], 'loss': ['hinge'], 'dual': [True, False]} search = TuneSearchCV(estimator=model, param_distributions=parameter_grid, search_optimization='random', n_trials=2, error_score=np.nan, verbose=True, random_state=110, refit=False) search.fit(X, y) self.assertTrue(np.isnan(search.cv_results_['split0_test_score'][1])) def test_error_score_early_stop(self): (X, y) = make_classification(n_samples=50, n_features=50, n_informative=3, random_state=0) model = SGDClassifier() parameter_grid = {'l1_ratio': [0.9, 1.2]} search = TuneSearchCV(estimator=model, param_distributions=parameter_grid, search_optimization='random', n_trials=2, error_score=np.nan, verbose=True, early_stopping=True, random_state=110, refit=False) search.fit(X, y) self.assertTrue(np.isnan(search.cv_results_['split0_test_score'][1]))
class TestInstagramPortraitPost(unittest.TestCase): ('nider.models.Image._set_fullpath') def setUp(self, *mocks): self.post = InstagramPortraitPost(content=mock.Mock(), fullpath=mock.Mock()) def test_size(self): self.assertEqual(self.post.width, 1080) self.assertEqual(self.post.height, 1350)
class UT_HAR_GRU(nn.Module): def __init__(self, hidden_dim=64): super(UT_HAR_GRU, self).__init__() self.gru = nn.GRU(90, hidden_dim, num_layers=1) self.fc = nn.Linear(hidden_dim, 7) def forward(self, x): x = x.view((- 1), 250, 90) x = x.permute(1, 0, 2) (_, ht) = self.gru(x) outputs = self.fc(ht[(- 1)]) return outputs
class RequestScope(Scope): def configure(self): self.context = None def __call__(self, request): assert (self.context is None) self.context = {} binder = self.injector.get(Binder) binder.bind(Request, to=request, scope=RequestScope) (yield) self.context = None def get(self, key, provider): if (self.context is None): raise UnsatisfiedRequirement(None, key) try: return self.context[key] except KeyError: provider = InstanceProvider(provider.get(self.injector)) self.context[key] = provider return provider
def test_remove(brew_info): brew_info.brew_input.extend(['aaa', 'bbb', 'ccc']) brew_info.remove('brew_input', 'bbb') assert (brew_info.brew_input == ['aaa', 'ccc']) brew_info.brew_input_opt.update({'aaa': 'aaa', 'bbb': 'bbb', 'ccc': 'ccc'}) brew_info.remove('brew_input_opt', 'bbb') assert (brew_info.brew_input_opt == {'aaa': 'aaa', 'ccc': 'ccc'})
class F13_TestCase(F12_TestCase): def __init__(self, *kargs, **kwargs): F12_TestCase.__init__(self, *kargs, **kwargs) self.validLevels.append('RAID4') def runTest(self): F12_TestCase.runTest(self) self.assert_parse(('raid / --device=md0 --level=4%s raid.01 raid.02' % (self.bytesPerInode,)), ('raid / --device=0 --level=RAID4%s raid.01 raid.02\n' % (self.bytesPerInode,))) self.assert_parse(('raid / --device=md0 --level=raid4%s raid.01 raid.02' % (self.bytesPerInode,)), ('raid / --device=0 --level=RAID4%s raid.01 raid.02\n' % (self.bytesPerInode,)))
def get_modname_from_path(modpath: pathlib.Path, package_path: pathlib.Path, add_package_name: bool=True) -> str: package_name: str = package_path.stem rel_path_parts = modpath.relative_to(package_path).parts modname = '' if (len(rel_path_parts) > 0): for part in rel_path_parts[:(- 1)]: modname += part modname += '.' if (rel_path_parts[(- 1)] == '__init__.py'): modname = modname[:(- 1)] else: modname = (modname + modpath.stem) if add_package_name: modname = (package_name if (modname == '') else ((package_name + '.') + modname)) else: assert (modname != '.') return modname
.parametrize('log_prob_key', [None, 'sample_log_prob', ('nested', 'sample_log_prob'), ('data', 'sample_log_prob')]) def test_nested_keys_probabilistic_delta(log_prob_key): policy_module = TensorDictModule(nn.Linear(1, 1), in_keys=[('data', 'states')], out_keys=[('data', 'param')]) td = TensorDict({'data': TensorDict({'states': torch.zeros(3, 4, 1)}, [3, 4])}, [3]) module = ProbabilisticTensorDictModule(in_keys=[('data', 'param')], out_keys=[('data', 'action')], distribution_class=Delta, return_log_prob=True, log_prob_key=log_prob_key) td_out = module(policy_module(td)) assert (td_out[('data', 'action')].shape == (3, 4, 1)) if log_prob_key: assert (td_out[log_prob_key].shape == (3, 4)) else: assert (td_out['sample_log_prob'].shape == (3, 4)) module = ProbabilisticTensorDictModule(in_keys={'param': ('data', 'param')}, out_keys=[('data', 'action')], distribution_class=Delta, return_log_prob=True, log_prob_key=log_prob_key) td_out = module(policy_module(td)) assert (td_out[('data', 'action')].shape == (3, 4, 1)) if log_prob_key: assert (td_out[log_prob_key].shape == (3, 4)) else: assert (td_out['sample_log_prob'].shape == (3, 4))
class GrabPointer(rq.ReplyRequest): _request = rq.Struct(rq.Opcode(26), rq.Bool('owner_events'), rq.RequestLength(), rq.Window('grab_window'), rq.Card16('event_mask'), rq.Set('pointer_mode', 1, (X.GrabModeSync, X.GrabModeAsync)), rq.Set('keyboard_mode', 1, (X.GrabModeSync, X.GrabModeAsync)), rq.Window('confine_to', (X.NONE,)), rq.Cursor('cursor', (X.NONE,)), rq.Card32('time')) _reply = rq.Struct(rq.ReplyCode(), rq.Card8('status'), rq.Card16('sequence_number'), rq.ReplyLength(), rq.Pad(24))
class AssignmentStmt(Statement): __slots__ = ('lvalues', 'rvalue', 'type', 'unanalyzed_type', 'new_syntax', 'is_alias_def', 'is_final_def', 'invalid_recursive_alias') __match_args__ = ('lvalues', 'rvalues', 'type') lvalues: list[Lvalue] rvalue: Expression type: (mypy.types.Type | None) unanalyzed_type: (mypy.types.Type | None) new_syntax: bool is_alias_def: bool is_final_def: bool invalid_recursive_alias: bool def __init__(self, lvalues: list[Lvalue], rvalue: Expression, type: (mypy.types.Type | None)=None, new_syntax: bool=False) -> None: super().__init__() self.lvalues = lvalues self.rvalue = rvalue self.type = type self.unanalyzed_type = type self.new_syntax = new_syntax self.is_alias_def = False self.is_final_def = False self.invalid_recursive_alias = False def accept(self, visitor: StatementVisitor[T]) -> T: return visitor.visit_assignment_stmt(self)
def test_dealloc_mix1_2_order(): allocs = ([1, 2] * 5) capacity = sum(allocs) allocator = RegionAllocator(capacity) regions = [] for alloc in allocs: regions.append(allocator.alloc(alloc)) for region in regions: allocator.dealloc(region) assert (allocator.get_free_size() == allocator.capacity)
_exporter class SceneToPixmapExporter(ExporterBase): TYPE = ExporterRegistry.DEFAULT_TYPE def get_user_input(self, parent): dialog = widgets.SceneToPixmapExporterDialog(parent=parent, default_size=self.default_size) if dialog.exec(): size = dialog.value() logger.debug(f'Got export size {size}') self.size = size return True else: return False def render_to_image(self): logger.debug(f'Final export size: {self.size}') margin = ((self.margin * self.size.width()) / self.default_size.width()) logger.debug(f'Final export margin: {margin}') image = QtGui.QImage(self.size, QtGui.QImage.Format.Format_RGB32) image.fill(QtGui.QColor(*constants.COLORS['Scene:Canvas'])) painter = QtGui.QPainter(image) target_rect = QtCore.QRectF(margin, margin, (self.size.width() - (2 * margin)), (self.size.height() - (2 * margin))) logger.trace(f'Final export target_rect: {target_rect}') self.scene.render(painter, source=self.scene.itemsBoundingRect(), target=target_rect) painter.end() return image def export(self, filename, worker=None): logger.debug(f'Exporting scene to {filename}') if worker: worker.begin_processing.emit(1) image = self.render_to_image() if (worker and worker.canceled): logger.debug('Export canceled') worker.finished.emit(filename, []) return if (not image.save(filename, quality=90)): msg = 'Error writing file' logger.debug(f'Export failed: {msg}') if worker: worker.finished.emit(filename, [msg]) return else: raise BeeFileIOError(msg, filename=filename) logger.debug('Export finished') if worker: worker.progress.emit(1) worker.finished.emit(filename, [])
class FakePathlibPathModule(): fake_pathlib = None def __init__(self, filesystem=None): if (self.fake_pathlib is None): self.__class__.fake_pathlib = FakePathlibModule(filesystem) def __call__(self, *args, **kwargs): return self.fake_pathlib.Path(*args, **kwargs) def __getattr__(self, name): return getattr(self.fake_pathlib.Path, name) def __instancecheck__(cls, instance): return isinstance(instance, PurePath)
def main(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--seed', type=int, default=1, help='random seed') args = parser.parse_args() pp.connect() _utils.init_simulation(camera_distance=1) unique_ids = _utils.create_pile(class_ids=[2, 3, 5, 11, 12, 15, 16], num_instances=8, random_state=np.random.RandomState(args.seed)) for unique_id in unique_ids: class_id = int(p.getUserData(p.getUserDataId(unique_id, 'class_id'))) class_name = safepicking.datasets.ycb.class_names[class_id] print(f'body_id={unique_id}, class_id={class_id:02d}, class_name={class_name}') while True: p.stepSimulation() time.sleep((1 / 240))
def eval_model(model, filepaths, entropy_estimation=False, half=False, savedir=''): device = next(model.parameters()).device metrics = defaultdict(float) for (idx, f) in enumerate(sorted(filepaths)): x = read_image(f).to(device) if (not entropy_estimation): print('evaluating index', idx) if half: model = model.half() x = x.half() rv = inference(model, x, savedir, idx) else: rv = inference_entropy_estimation(model, x) print('bpp', rv['bpp']) print('psnr', rv['psnr']) print('ms-ssim', rv['ms-ssim']) print() for (k, v) in rv.items(): metrics[k] += v for (k, v) in metrics.items(): metrics[k] = (v / len(filepaths)) return metrics
def apply_rc(mediator: Mediator, request: BaseNameLayoutRequest, request_checker: RequestChecker, field: BaseField) -> bool: owner_type = request.loc_map[TypeHintLoc].type filter_request = NameMappingFilterRequest(loc_map=field_to_loc_map(owner_type, field)) try: request_checker.check_request(ExtraStackMediator(mediator, [filter_request]), filter_request) except CannotProvide: return False return True
def add_data_args(parser): parser.add_argument('--dataset', type=str, choices=DATASET_CHOICES, help='dataset format') parser.add_argument('--data-dir', type=str, help='data directory') parser.add_argument('--split-sizes', type=float, nargs=3, default=[0.8, 0.1, 0.1], help='train/val/test proportions for datasets where not provided') parser.add_argument('--limit', type=int, default=None, help='limit the number of examples') parser.add_argument('--length-limit', type=int, default=1000000, help='limit the number of words per example') parser.add_argument('--lower-length-limit', type=int, default=0, help='limit the number of words per example') parser.add_argument('--summary-length-limit', type=int, default=1000000, help='limit the number of words in the summary') parser.add_argument('--csv-column', type=str, help='column name to use as input for csv when using csv dataset') parser.add_argument('--num-workers', type=int, default=20, help='number of workers for data loading') return parser
def squad_convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, return_dataset=False, threads=1): features = [] threads = min(threads, cpu_count()) with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p: annotate_ = partial(squad_convert_example_to_features, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, is_training=is_training) features = list(tqdm(p.imap(annotate_, examples, chunksize=32), total=len(examples), desc='convert squad examples to features')) new_features = [] unique_id = example_index = 0 for example_features in tqdm(features, total=len(features), desc='add example index and unique id'): if (not example_features): continue for example_feature in example_features: example_feature.example_index = example_index example_feature.unique_id = unique_id new_features.append(example_feature) unique_id += 1 example_index += 1 features = new_features del new_features if (return_dataset == 'pt'): if (not is_torch_available()): raise RuntimeError('PyTorch must be installed to return a PyTorch dataset.') all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) if (not is_training): all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) dataset = TensorDataset(all_input_ids, all_attention_masks, all_token_type_ids, all_example_index, all_cls_index, all_p_mask) else: all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) dataset = TensorDataset(all_input_ids, all_attention_masks, all_token_type_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask) return (features, dataset) elif (return_dataset == 'tf'): if (not is_tf_available()): raise RuntimeError('TensorFlow must be installed to return a TensorFlow dataset.') def gen(): for ex in features: (yield ({'input_ids': ex.input_ids, 'attention_mask': ex.attention_mask, 'token_type_ids': ex.token_type_ids}, {'start_position': ex.start_position, 'end_position': ex.end_position, 'cls_index': ex.cls_index, 'p_mask': ex.p_mask})) return tf.data.Dataset.from_generator(gen, ({'input_ids': tf.int32, 'attention_mask': tf.int32, 'token_type_ids': tf.int32}, {'start_position': tf.int64, 'end_position': tf.int64, 'cls_index': tf.int64, 'p_mask': tf.int32}), ({'input_ids': tf.TensorShape([None]), 'attention_mask': tf.TensorShape([None]), 'token_type_ids': tf.TensorShape([None])}, {'start_position': tf.TensorShape([]), 'end_position': tf.TensorShape([]), 'cls_index': tf.TensorShape([]), 'p_mask': tf.TensorShape([None])})) return features
class MaxOrderSize(TradingControl): def __init__(self, on_error, asset=None, max_shares=None, max_notional=None): super(MaxOrderSize, self).__init__(on_error, asset=asset, max_shares=max_shares, max_notional=max_notional) self.asset = asset self.max_shares = max_shares self.max_notional = max_notional if ((max_shares is None) and (max_notional is None)): raise ValueError('Must supply at least one of max_shares and max_notional') if (max_shares and (max_shares < 0)): raise ValueError('max_shares cannot be negative.') if (max_notional and (max_notional < 0)): raise ValueError('max_notional must be positive.') def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): if ((self.asset is not None) and (self.asset != asset)): return if ((self.max_shares is not None) and (abs(amount) > self.max_shares)): self.handle_violation(asset, amount, algo_datetime) current_asset_price = algo_current_data.current(asset, 'price') order_value = (amount * current_asset_price) too_much_value = ((self.max_notional is not None) and (abs(order_value) > self.max_notional)) if too_much_value: self.handle_violation(asset, amount, algo_datetime)
class MaximumEntropyInverseRL(): def __init__(self, agent): self.agent = agent def train(self, sess): start = time.time() max_epoch = AbstractLearning.max_epochs dataset_size = AbstractLearning.dataset_size tuning_size = AbstractLearning.validation_datasize train_size = (dataset_size - tuning_size) logger.Log.info(((('Train Data: ' + str(train_size)) + ' Tuning Data ') + str(tuning_size))) saver = tf.train.Saver(max_to_keep=AbstractLearning.models_to_keep) iteration = 0 avg_bisk_metric = self.agent.test(tuning_size) min_avg_bisk_metric = avg_bisk_metric patience = 0 max_patience = AbstractLearning.max_patience logger.Log.info(('Tuning Data: (Before Training) Avg. Bisk Metric: ' + str(avg_bisk_metric))) for epoch in range(1, (max_epoch + 1)): logger.Log.info((('\n Starting Epoch: ' + str(epoch)) + '\n')) for data_point in range(1, (train_size + 1)): state = collections.deque([], 5) dummy_images = self.policy_model.image_embedder.get_dummy_images() [state.append(v) for v in dummy_images] (_, _, current_env, instruction, trajectory) = self.agent.receive_instruction_and_image() state.append(current_env) (text_input_word_indices, text_mask) = self.policy_model.text_embedder.get_word_indices_and_mask(instruction) logger.Log.info((((('\n ' + str(data_point)) + ': Instruction: ') + str(instruction)) + '\n')) traj_ix = 0 total_reward_episode = 0 steps = 0 previous_action = self.policy_model.null_previous_action block_id = int((trajectory[0] / 4.0)) while True: action_id = trajectory[traj_ix] traj_ix += 1 action_str = self.agent.message_protocol_kit.encode_action(action_id) logger.Log.debug(('Sending Message: ' + action_str)) self.agent.connection.send_message(action_str) (status_code, reward, new_env, is_reset) = self.agent.receive_response_and_image() logger.Log.debug(('Received reward: ' + str(reward))) if (action_id == 80): direction_id = 4 else: direction_id = (action_id % 4) replay_memory_item = rm.ReplayMemory(text_input_word_indices, text_mask, state, (block_id, direction_id), 1.0, new_env, None, previous_action_id=previous_action) self.replay_memory.appendleft(replay_memory_item) state.append(new_env) total_reward_episode += reward steps += 1 previous_action = (self.agent.get_code_from_status_code(status_code), direction_id, block_id) if self.agent.message_protocol_kit.is_reset_message(is_reset): logger.Log.debug('Resetting the episode') self.agent.connection.send_message('Ok-Reset') logger.Log.debug('Now waiting for response') sample = self.ps.sample(self.replay_memory, self.batch_size) loss = self.min_loss(sample, sess) if np.isnan(loss): logger.Log.info('NaN found. Exiting') exit(0) iteration += 1 logger.Log.info(((((('Number of sample ' + str(len(sample))) + ' size of replay memory ') + str(len(self.replay_memory))) + ' loss = ') + str(loss))) logger.Log.info(((('Total reward:' + str(total_reward_episode)) + ' Steps: ') + str(steps))) total_time = (time.time() - start) logger.Log.info(('Total time: ' + str(total_time))) logger.Log.flush() break avg_bisk_metric = self.agent.test(tuning_size) logger.Log.info(((((('Tuning Data: (end of epoch ' + str(epoch)) + ') Avg. Bisk Metric: ') + str(avg_bisk_metric)) + 'Min was ') + str(min_avg_bisk_metric))) save_path = saver.save(sess, (('./saved/model_epoch_' + str(epoch)) + '.ckpt')) logger.Log.info(('Model saved in file: ' + str(save_path))) if (avg_bisk_metric >= min_avg_bisk_metric): if (patience == max_patience): logger.Log.info((((('Max patience reached. Terminating learning after ' + str(epoch)) + ' epochs and ') + str(iteration)) + ' iterations.')) break else: logger.Log.info(('Tuning accuracy did not improve. Increasing patience to ' + str((patience + 1)))) patience += 1 else: logger.Log.info('Resetting patience to 0') patience = 0 min_avg_bisk_metric = min(min_avg_bisk_metric, avg_bisk_metric) logger.Log.close()
class Document(ElementProxy): def __init__(self, element: CT_Document, part: DocumentPart): super(Document, self).__init__(element) self._element = element self._part = part self.__body = None def add_heading(self, text: str='', level: int=1): if (not (0 <= level <= 9)): raise ValueError(('level must be in range 0-9, got %d' % level)) style = ('Title' if (level == 0) else ('Heading %d' % level)) return self.add_paragraph(text, style) def add_page_break(self): paragraph = self.add_paragraph() paragraph.add_run().add_break(WD_BREAK.PAGE) return paragraph def add_paragraph(self, text: str='', style: ((str | ParagraphStyle) | None)=None) -> Paragraph: return self._body.add_paragraph(text, style) def add_picture(self, image_path_or_stream: (str | IO[bytes]), width: ((int | Length) | None)=None, height: ((int | Length) | None)=None): run = self.add_paragraph().add_run() return run.add_picture(image_path_or_stream, width, height) def add_section(self, start_type: WD_SECTION=WD_SECTION.NEW_PAGE): new_sectPr = self._element.body.add_section_break() new_sectPr.start_type = start_type return Section(new_sectPr, self._part) def add_table(self, rows: int, cols: int, style: ((str | _TableStyle) | None)=None): table = self._body.add_table(rows, cols, self._block_width) table.style = style return table def core_properties(self): return self._part.core_properties def inline_shapes(self): return self._part.inline_shapes def iter_inner_content(self) -> Iterator[(Paragraph | Table)]: return self._body.iter_inner_content() def paragraphs(self) -> List[Paragraph]: return self._body.paragraphs def part(self) -> DocumentPart: return self._part def save(self, path_or_stream: (str | IO[bytes])): self._part.save(path_or_stream) def sections(self) -> Sections: return Sections(self._element, self._part) def settings(self) -> Settings: return self._part.settings def styles(self): return self._part.styles def tables(self) -> List[Table]: return self._body.tables def _block_width(self) -> Length: section = self.sections[(- 1)] return Emu(((section.page_width - section.left_margin) - section.right_margin)) def _body(self) -> _Body: if (self.__body is None): self.__body = _Body(self._element.body, self) return self.__body
class CifarNet(nn.Module): def __init__(self): super(CifarNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3) self.conv2 = nn.Conv2d(64, 64, kernel_size=3) self.conv3 = nn.Conv2d(64, 128, kernel_size=3) self.conv4 = nn.Conv2d(128, 128, kernel_size=3) self.pool = nn.MaxPool2d(2, 2) self.relu = nn.ReLU(inplace=True) self.fc1 = nn.Linear(3200, 256) self.dropout = nn.Dropout(0.5) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, 10) def forward(self, x): x = self.relu(self.conv1(x)) x = self.relu(self.conv2(x)) x = self.pool(x) x = self.relu(self.conv3(x)) x = self.relu(self.conv4(x)) x = self.pool(x) x = x.view((- 1), 3200) x = self.relu(self.fc1(x)) x = self.dropout(x) x = self.relu(self.fc2(x)) x = self.fc3(x) return x
def test_trafficstopaction(): tsa = OSC.TrafficStopAction('hej') tsa2 = OSC.TrafficStopAction('hej') tsa3 = OSC.TrafficStopAction('hey') prettyprint(tsa) assert (tsa == tsa2) assert (tsa != tsa3) tsa4 = OSC.TrafficStopAction.parse(tsa.get_element()) prettyprint(tsa4.get_element()) assert (tsa == tsa4) assert (version_validation('GlobalAction', tsa, 0) == ValidationResponse.OSC_VERSION) assert (version_validation('GlobalAction', tsa, 1) == ValidationResponse.OK) assert (version_validation('GlobalAction', tsa, 2) == ValidationResponse.OK)
class TestTopPSamplingSearch(TestSequenceGeneratorBase): def setUp(self): d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 self.src_tokens = torch.LongTensor([[self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos]]) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0.0 self.min_top2_prob = 0.75 self.min_top1_prob = 0.4 w1_prob = self.min_top1_prob w2_prob = (self.min_top2_prob - self.min_top1_prob) eos_prob = (1 - self.min_top2_prob) args.beam_probs = [torch.FloatTensor([[0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0]]), torch.FloatTensor([[eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob]]), torch.FloatTensor([[1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0]])] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_topp_sampling_search_low_prob(self): low_sampling_topp = (self.min_top1_prob / 2.0) generator = SequenceGenerator(self.tgt_dict, beam_size=2, sampling=True, sampling_topp=low_sampling_topp) sample = {'net_input': {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}} hypos = generator.generate([self.model], sample) (eos, w1) = (self.eos, self.w1) self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0]) self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0]) self.assertHypoTokens(hypos[1][0], [w1, w1, eos]) self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0]) self.assertHypoTokens(hypos[1][1], [w1, w1, eos]) self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0]) def test_topp_sampling_search_high_prob(self): high_sampling_topp = ((self.min_top1_prob + self.min_top2_prob) / 2.0) generator = SequenceGenerator(self.tgt_dict, beam_size=2, sampling=True, sampling_topp=high_sampling_topp) sample = {'net_input': {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}} hypos = generator.generate([self.model], sample) (eos, w1, w2) = (self.eos, self.w1, self.w2) self.assertTrue((self.hypoTokens(hypos[0][0], [w1, w1, eos]) or self.hypoTokens(hypos[0][0], [w1, w2, eos]))) self.assertTrue((self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0]))) self.assertTrue((self.hypoTokens(hypos[0][1], [w1, w1, eos]) or self.hypoTokens(hypos[0][1], [w1, w2, eos]))) self.assertTrue((self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0]))) self.assertTrue((self.hypoTokens(hypos[1][0], [w1, w1, eos]) or self.hypoTokens(hypos[1][0], [w1, w2, eos]))) self.assertTrue((self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0]))) self.assertTrue((self.hypoTokens(hypos[1][1], [w1, w1, eos]) or self.hypoTokens(hypos[1][1], [w1, w2, eos]))) self.assertTrue((self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0]))) def hypoTokens(self, hypo, tokens): return self.tensorEqual(hypo['tokens'], torch.LongTensor(tokens)) def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() if (not self.almostEqual(hypo['positional_scores'], pos_scores)): return False if (pos_scores.numel() != hypo['tokens'].numel()): return False score = pos_scores.sum() if normalized: score /= (pos_scores.numel() ** lenpen) return (abs((score - hypo['score'])) < 1e-06) def almostEqual(self, t1, t2): return ((t1.size() == t2.size()) and ((t1 - t2).abs().max() < 0.0001)) def tensorEqual(self, t1, t2): return ((t1.size() == t2.size()) and (t1.ne(t2).long().sum() == 0))
class DocumentationLink(ModelReprMixin, models.Model): package = models.CharField(primary_key=True, max_length=50, validators=(package_name_validator,), help_text='The Python package name that this documentation link belongs to.') base_url = models.URLField(help_text='The base URL from which documentation will be available for this project. Used to generate links to various symbols within this package.', blank=True, validators=(ends_with_slash_validator,)) inventory_url = models.URLField(help_text='The URL at which the Sphinx inventory is available for this package.') class Meta(): ordering = ['package'] def __str__(self): return f'{self.package} - {self.base_url}'
class TestTupleNames(unittest.TestCase): def setUp(self) -> None: self.inst_a = RInstance(ClassIR('A', '__main__')) self.inst_b = RInstance(ClassIR('B', '__main__')) def test_names(self) -> None: assert (RTuple([int_rprimitive, int_rprimitive]).unique_id == 'T2II') assert (RTuple([list_rprimitive, object_rprimitive, self.inst_a]).unique_id == 'T3OOO') assert (RTuple([list_rprimitive, object_rprimitive, self.inst_b]).unique_id == 'T3OOO') assert (RTuple([]).unique_id == 'T0') assert (RTuple([RTuple([]), RTuple([int_rprimitive, int_rprimitive])]).unique_id == 'T2T0T2II') assert (RTuple([bool_rprimitive, RUnion([bool_rprimitive, int_rprimitive])]).unique_id == 'T2CO')
class FC3_RaidData(BaseData): removedKeywords = BaseData.removedKeywords removedAttrs = BaseData.removedAttrs def __init__(self, *args, **kwargs): BaseData.__init__(self, *args, **kwargs) self.device = kwargs.get('device', None) self.fstype = kwargs.get('fstype', '') self.level = kwargs.get('level', '') self.format = kwargs.get('format', True) self.spares = kwargs.get('spares', 0) self.preexist = kwargs.get('preexist', False) self.mountpoint = kwargs.get('mountpoint', '') self.members = kwargs.get('members', []) def __eq__(self, y): if (not y): return False return (self.device == y.device) def __ne__(self, y): return (not (self == y)) def _getArgsAsStr(self): retval = '' if str(self.device): retval += (' --device=%s' % self.device) if self.fstype: retval += (' --fstype="%s"' % self.fstype) if self.level: retval += (' --level=%s' % self.level.upper()) if (not self.format): retval += ' --noformat' if (self.spares != 0): retval += (' --spares=%d' % self.spares) if self.preexist: retval += ' --useexisting' return retval def __str__(self): retval = BaseData.__str__(self) retval += ('raid %s%s' % (self.mountpoint, self._getArgsAsStr())) if (not self.preexist): retval += (' ' + ' '.join(self.members)) return (retval.strip() + '\n')
class GaussNewtonCG(ConjugateGradientBase): def __init__(self, problem: L2Problem, variable: TensorList, cg_eps=0.0, fletcher_reeves=True, standard_alpha=True, direction_forget_factor=0, debug=False, analyze=False, plotting=False, visdom=None): super().__init__(fletcher_reeves, standard_alpha, direction_forget_factor, (debug or analyze or plotting)) self.problem = problem self.x = variable self.analyze_convergence = analyze self.plotting = plotting self.fig_num = (10, 11, 12) self.visdom = visdom self.cg_eps = cg_eps self.f0 = None self.g = None self.dfdxt_g = None self.residuals = torch.zeros(0) self.losses = torch.zeros(0) self.gradient_mags = torch.zeros(0) def clear_temp(self): self.f0 = None self.g = None self.dfdxt_g = None def run_GN(self, *args, **kwargs): return self.run(*args, **kwargs) def run(self, num_cg_iter, num_gn_iter=None): if isinstance(num_cg_iter, int): if (num_gn_iter is None): raise ValueError('Must specify number of GN iter if CG iter is constant') num_cg_iter = ([num_cg_iter] * num_gn_iter) num_gn_iter = len(num_cg_iter) if (num_gn_iter == 0): return if self.analyze_convergence: self.evaluate_CG_iteration(0) for cg_iter in num_cg_iter: self.run_GN_iter(cg_iter) if self.debug: if (not self.analyze_convergence): self.f0 = self.problem(self.x) loss = self.problem.ip_output(self.f0, self.f0) self.losses = torch.cat((self.losses, loss.detach().cpu().view((- 1)))) if (self.visdom is not None): self.visdom.register(self.losses, 'lineplot', 3, 'Loss') self.visdom.register(self.residuals, 'lineplot', 3, 'CG residuals') if self.analyze_convergence: self.visdom.register(self.gradient_mags, 'lineplot', 4, 'Gradient magnitude') elif self.plotting: plot_graph(self.losses, self.fig_num[0], title='Loss') plot_graph(self.residuals, self.fig_num[1], title='CG residuals') if self.analyze_convergence: plot_graph(self.gradient_mags, self.fig_num[2], 'Gradient magnitude') self.x.detach_() self.clear_temp() return (self.losses, self.residuals) def run_GN_iter(self, num_cg_iter): self.x.requires_grad_(True) self.f0 = self.problem(self.x) self.g = self.f0.detach() if (self.debug and (not self.analyze_convergence)): loss = self.problem.ip_output(self.g, self.g) self.losses = torch.cat((self.losses, loss.detach().cpu().view((- 1)))) self.g.requires_grad_(True) self.dfdxt_g = TensorList(torch.autograd.grad(self.f0, self.x, self.g, create_graph=True)) self.b = (- self.dfdxt_g.detach()) (delta_x, res) = self.run_CG(num_cg_iter, eps=self.cg_eps) self.x.detach_() self.x += delta_x if self.debug: self.residuals = torch.cat((self.residuals, res)) def A(self, x): dfdx_x = torch.autograd.grad(self.dfdxt_g, self.g, x, retain_graph=True) return TensorList(torch.autograd.grad(self.f0, self.x, dfdx_x, retain_graph=True)) def ip(self, a, b): return self.problem.ip_input(a, b) def M1(self, x): return self.problem.M1(x) def M2(self, x): return self.problem.M2(x) def evaluate_CG_iteration(self, delta_x): if self.analyze_convergence: x = (self.x + delta_x).detach() x.requires_grad_(True) f = self.problem(x) loss = self.problem.ip_output(f, f) grad = TensorList(torch.autograd.grad(loss, x)) self.losses = torch.cat((self.losses, loss.detach().cpu().view((- 1)))) self.gradient_mags = torch.cat((self.gradient_mags, sum((grad.view((- 1)) grad.view((- 1)))).cpu().sqrt().detach().view((- 1))))
def is_same_side(p1: ColorXY, p2: ColorXY, a: ColorXY, b: ColorXY) -> bool: vector_ab = [(y - x) for (x, y) in zip(a, b)] vector_ap1 = [(y - x) for (x, y) in zip(a, p1)] vector_ap2 = [(y - x) for (x, y) in zip(a, p2)] cross_vab_ap1 = ((vector_ab[0] * vector_ap1[1]) - (vector_ab[1] * vector_ap1[0])) cross_vab_ap2 = ((vector_ab[0] * vector_ap2[1]) - (vector_ab[1] * vector_ap2[0])) return ((cross_vab_ap1 * cross_vab_ap2) >= 0)
def plot_segments(onsets, offsets, labels, palette, y=0.5, seg_height=0.1, ax=None, patch_kwargs=None): if (patch_kwargs is None): patch_kwargs = {} if (ax is None): (fig, ax) = plt.subplots segments = [Rectangle(xy=(on, y), height=seg_height, width=(off - on)) for (on, off) in zip(onsets, offsets)] facecolors = [palette[label] for label in labels] pc = PatchCollection(segments, facecolors=facecolors, **patch_kwargs, label='segment') ax.add_collection(pc) return pc
def platipy_cli(): if ((len(sys.argv) == 1) or (not (sys.argv[1] in tools))): print('') print(' PlatiPy CLI (Command Line Interface)') print(' ') print('') print(' Usage: platipy [tool]') print('') print(' Supply the name of the desired tool:') for key in tools: print(f' {key}') print('') sys.exit() tool = sys.argv[1] del sys.argv[1] tools[tool]()
_fixtures(WebFixture, AccessDomainFixture, AccessUIFixture) def test_edit_and_add_own(web_fixture, access_domain_fixture, access_ui_fixture): browser = access_ui_fixture.browser fixture = access_domain_fixture account = fixture.account address_book = fixture.address_book web_fixture.log_in(browser=browser, system_account=account) browser.open('/') browser.click(XPath.link().with_text('Address book of ')) browser.click(XPath.link().with_text('Add address')) browser.type(XPath.input_labelled('Name'), 'Someone') browser.type(XPath.input_labelled('Email'), '') browser.click(XPath.button_labelled('Save')) assert browser.is_element_present(XPath.paragraph().including_text('Someone: ')) browser.click(XPath.button_labelled('Edit')) browser.type(XPath.input_labelled('Name'), 'Else') browser.type(XPath.input_labelled('Email'), '') browser.click(XPath.button_labelled('Update')) assert browser.is_element_present(XPath.paragraph().including_text('Else: '))
def format_to_lines_new(args): json_dir_init = os.path.abspath(args.raw_path) dataset_split = ['test', 'valid', 'train'] lang_split = ['eng', 'chn'] (train_files, valid_files, test_files) = ({}, {}, {}) for data_sp in dataset_split: for lan_sp in lang_split: if (data_sp == 'train'): train_files[lan_sp] = os.listdir(os.path.join(json_dir_init, data_sp, lan_sp)) elif (data_sp == 'test'): test_files[lan_sp] = os.listdir(os.path.join(json_dir_init, data_sp, lan_sp)) elif (data_sp == 'valid'): valid_files[lan_sp] = os.listdir(os.path.join(json_dir_init, data_sp, lan_sp)) corpora = {'train': train_files, 'valid': valid_files, 'test': test_files} for corpus_type in ['train', 'valid', 'test']: a_lst = [(f_eng, (''.join(f_eng.split('.')[:(- 2)]) + '.chnref.json'), args) for f_eng in corpora[corpus_type]['eng']] a_lst = [(os.path.join(json_dir_init, corpus_type, 'eng', f_eng), os.path.join(json_dir_init, corpus_type, 'chn', f_chn), args) for (f_eng, f_chn, args) in a_lst] pool = Pool(args.n_cpus) dataset = [] p_ct = 0 for d in pool.imap_unordered(_format_to_lines_new, a_lst): dataset.append(d) if (len(dataset) > args.shard_size): pt_file = '{:s}.{:s}.{:d}.json'.format(args.save_path, corpus_type, p_ct) with open(pt_file, 'w') as save: print('saving to file ', pt_file) save.write(json.dumps(dataset, ensure_ascii=False)) p_ct += 1 dataset = [] pool.close() pool.join() if (len(dataset) > 0): pt_file = '{:s}.{:s}.{:d}.json'.format(args.save_path, corpus_type, p_ct) with open(pt_file, 'w') as save: print('saving to file ', pt_file) save.write(json.dumps(dataset, ensure_ascii=False)) p_ct += 1 dataset = [] return
def test_model_gradient_descent_limited_evaluations(): x0 = np.random.randn(10) sample_radius = 0.1 rate = 0.1 result = model_gradient_descent(sum_of_squares, x0, sample_radius=sample_radius, n_sample_points=10, rate=rate, tol=1e-08, known_values=None, max_evaluations=15) assert isinstance(result.x, np.ndarray) assert isinstance(result.fun, float) assert (result.nfev == 12)
class SeviriL2AMVBufrData(): (sys.platform.startswith('win'), "'eccodes' not supported on Windows") def __init__(self, filename): from satpy.readers.seviri_l2_bufr import SeviriL2BufrFileHandler with mock.patch('satpy.readers.seviri_l2_bufr.np.fromfile'): self.fh = SeviriL2BufrFileHandler(filename, FILENAME_INFO2, filetype_info={'file_type': 'seviri_l2_bufr_amv'}, with_area_definition=True)
class DirectoryFormat(FormatBase, metaclass=_DirectoryMeta): def validate(self, level='max'): _check_validation_level(level) if (not self.path.is_dir()): raise ValidationError(('%s is not a directory.' % self.path)) collected_paths = {p: None for p in self.path.glob('**/*') if ((not p.name.startswith('.')) and p.is_file())} for field in self._fields: getattr(self, field)._validate_members(collected_paths, level) for (path, value) in collected_paths.items(): if value: continue if (value is None): raise ValidationError(('Unrecognized file (%s) for %s.' % (path, self.__class__.__name__))) if hasattr(self, '_validate_'): try: self._validate_(level) except ValidationError as e: raise ValidationError(('%s is not a(n) %s:\n\n%s' % (self.path, self.__class__.__name__, str(e)))) from e def save(self, path, ext=None): path = str(path) path = path.rstrip('.') shutil.copytree(self.path, path) return path
def train(epoch, model, model_ema, vnet, optimizer_model, optimizer_vnet, train_loader, train_meta_loader, meta_lr): print(('\nEpoch: %d' % epoch)) train_loss = 0 meta_loss = 0 train_meta_loader_iter = iter(train_meta_loader) for (batch_idx, (inputs, targets, _, index)) in enumerate(train_loader): model.train() model_ema.train() targets = torch.zeros(inputs.shape[0], args.num_classes).scatter_(1, targets.view((- 1), 1).long(), 1) (inputs, targets) = (inputs.cuda(), targets.cuda()) with torch.no_grad(): outputs_ema = model_ema(inputs) psudo_label = get_label(index, outputs_ema, epoch) l = torch.distributions.beta.Beta(args.alpha, args.alpha).sample().cuda() l = max(l, (1 - l)) idx = torch.randperm(inputs.shape[0]) mix_inputs = ((l * inputs) + ((1 - l) * inputs[idx])) if ((batch_idx % 10) == 0): meta_model = build_model().cuda() meta_model.load_state_dict(model.state_dict()) outputs = meta_model(mix_inputs) cost_1 = torch.sum(((- F.log_softmax(outputs, dim=1)) * targets), dim=1) cost_11 = torch.reshape(cost_1, (len(cost_1), 1)) v_lambda_1 = vnet(cost_11.data, targets.data, c).squeeze(1) cost_2 = torch.sum(((- F.log_softmax(outputs, dim=1)) * targets[idx]), dim=1) cost_12 = torch.reshape(cost_2, (len(cost_2), 1)) v_lambda_2 = vnet(cost_12.data, targets[idx].data, c).squeeze(1) l_f_meta = ((l * ((torch.sum(((- F.log_softmax(outputs, dim=1)) * targets), dim=1) * v_lambda_1) + (torch.sum(((- F.log_softmax(outputs, dim=1)) * psudo_label), dim=1) * (1 - v_lambda_1)))) + ((1 - l) * ((torch.sum(((- F.log_softmax(outputs, dim=1)) * targets[idx]), dim=1) * v_lambda_2) + (torch.sum(((- F.log_softmax(outputs, dim=1)) * psudo_label[idx]), dim=1) * (1 - v_lambda_2))))).mean() meta_model.zero_grad() grads = torch.autograd.grad(l_f_meta, meta_model.params(), create_graph=True) meta_model.update_params(lr_inner=meta_lr, source_params=grads) del grads try: (inputs_val, targets_val, _, _) = next(train_meta_loader_iter) except StopIteration: train_meta_loader_iter = iter(train_meta_loader) (inputs_val, targets_val, _, _) = next(train_meta_loader_iter) (inputs_val, targets_val) = (inputs_val.cuda(), targets_val.cuda()) y_g_hat = meta_model(inputs_val) l_g_meta = F.cross_entropy(y_g_hat, targets_val) optimizer_vnet.zero_grad() l_g_meta.backward() optimizer_vnet.step() outputs = model(mix_inputs) cost_1 = torch.sum(((- F.log_softmax(outputs, dim=1)) * targets), dim=1) cost_11 = torch.reshape(cost_1, (len(cost_1), 1)) cost_2 = torch.sum(((- F.log_softmax(outputs, dim=1)) * targets[idx]), dim=1) cost_12 = torch.reshape(cost_2, (len(cost_2), 1)) with torch.no_grad(): v_lambda_1 = vnet(cost_11, targets, c).squeeze(1) v_lambda_2 = vnet(cost_12, targets[idx], c).squeeze(1) loss = ((l * ((torch.sum(((- F.log_softmax(outputs, dim=1)) * targets), dim=1) * v_lambda_1) + (torch.sum(((- F.log_softmax(outputs, dim=1)) * psudo_label), dim=1) * (1 - v_lambda_1)))) + ((1 - l) * ((torch.sum(((- F.log_softmax(outputs, dim=1)) * targets[idx]), dim=1) * v_lambda_2) + (torch.sum(((- F.log_softmax(outputs, dim=1)) * psudo_label[idx]), dim=1) * (1 - v_lambda_2))))).mean() optimizer_model.zero_grad() loss.backward() optimizer_model.step() my_wl(cost_11, v_lambda_1, index) for (param, param_ema) in zip(model.params(), model_ema.params()): param_ema.data.mul_(0.999).add_(0.001, param.data) train_loss += loss.item() meta_loss += l_g_meta.item() if (((batch_idx + 1) % 50) == 0): print(('Epoch: [%d/%d]\tIters: [%d/%d]\tLoss: %.4f\tMetaLoss:%.4f\t' % ((epoch + 1), args.epochs, (batch_idx + 1), (len(train_loader.dataset) / args.batch_size), (train_loss / (batch_idx + 1)), (meta_loss / (batch_idx + 1)))))
def get_worker_config(dask_worker): from .proxify_host_file import ProxifyHostFile plugin_vals = dask_worker.plugins.values() ret = {} for p in plugin_vals: config = {v: getattr(p, v) for v in dir(p) if (not (v.startswith('_') or (v in {'setup', 'cores'})))} try: pickle.dumps(config) except TypeError: config = 'UNKNOWN CONFIG' ret[f'[plugin] {type(p).__name__}'] = config for mem in ['memory_limit', 'memory_pause_fraction', 'memory_spill_fraction', 'memory_target_fraction']: ret[mem] = getattr(dask_worker.memory_manager, mem) ret['jit-unspill'] = isinstance(dask_worker.data, ProxifyHostFile) if ret['jit-unspill']: ret['device-memory-limit'] = dask_worker.data.manager._device_memory_limit else: has_device = hasattr(dask_worker.data, 'device_buffer') if has_device: ret['device-memory-limit'] = dask_worker.data.device_buffer.n (scheme, loc) = parse_address(dask_worker.scheduler.address) ret['protocol'] = scheme if (scheme == 'ucx'): import ucp ret['ucx-transports'] = ucp.get_active_transports() elif (scheme == 'ucxx'): import ucxx ret['ucx-transports'] = ucxx.get_active_transports() ret['distributed.comm.timeouts'] = dask.config.get('distributed.comm.timeouts') return ret
class FConvEncoder(FairseqEncoder): def __init__(self, dictionary, embed_dim=512, max_positions=1024, convolutions=(((512, 3),) * 20), dropout=0.1, attention=False, attention_nheads=1): super().__init__(dictionary) self.dropout = dropout self.num_attention_layers = None num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) self.embed_positions = PositionalEmbedding(max_positions, embed_dim, self.padding_idx) def expand_bool_array(val): if isinstance(val, bool): return ([val] * len(convolutions)) return val attention = expand_bool_array(attention) in_channels = convolutions[0][0] self.fc1 = Linear(embed_dim, in_channels, dropout=dropout) self.projections = nn.ModuleList() self.convolutions = nn.ModuleList() self.attention = nn.ModuleList() self.attproj = nn.ModuleList() for (i, (out_channels, kernel_size)) in enumerate(convolutions): self.projections.append((Linear(in_channels, out_channels) if (in_channels != out_channels) else None)) self.convolutions.append(ConvTBC(in_channels, (out_channels * 2), kernel_size, dropout=dropout)) self.attention.append((SelfAttention(out_channels, embed_dim, attention_nheads) if attention[i] else None)) in_channels = out_channels self.fc2 = Linear(in_channels, embed_dim) def forward(self, src_tokens, src_lengths): x = (self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)) x = F.dropout(x, p=self.dropout, training=self.training) input_embedding = x.transpose(0, 1) x = self.fc1(x) encoder_padding_mask = src_tokens.eq(self.padding_idx).t() if (not encoder_padding_mask.any()): encoder_padding_mask = None x = x.transpose(0, 1) for (proj, conv, attention) in zip(self.projections, self.convolutions, self.attention): residual = (x if (proj is None) else proj(x)) if (encoder_padding_mask is not None): x = x.masked_fill(encoder_padding_mask.unsqueeze((- 1)), 0) x = F.dropout(x, p=self.dropout, training=self.training) padding_l = ((conv.kernel_size[0] - 1) // 2) padding_r = (conv.kernel_size[0] // 2) x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r)) x = conv(x) x = F.glu(x, dim=2) if (attention is not None): x = attention(x) x = ((x + residual) * math.sqrt(0.5)) x = x.transpose(1, 0) x = self.fc2(x) if (encoder_padding_mask is not None): encoder_padding_mask = encoder_padding_mask.t() x = x.masked_fill(encoder_padding_mask.unsqueeze((- 1)), 0) x = GradMultiply.apply(x, (1.0 / (2.0 * self.num_attention_layers))) y = ((x + input_embedding.transpose(0, 1)) * math.sqrt(0.5)) return {'encoder_out': (x, y), 'encoder_padding_mask': encoder_padding_mask} def reorder_encoder_out(self, encoder_out, new_order): encoder_out['encoder_out'] = tuple((eo.index_select(0, new_order) for eo in encoder_out['encoder_out'])) if (encoder_out['encoder_padding_mask'] is not None): encoder_out['encoder_padding_mask'] = encoder_out['encoder_padding_mask'].index_select(0, new_order) if ('pretrained' in encoder_out): encoder_out['pretrained']['encoder_out'] = tuple((eo.index_select(0, new_order) for eo in encoder_out['pretrained']['encoder_out'])) return encoder_out def max_positions(self): return self.embed_positions.max_positions()
def draw_experiment(env_axes, experiment_statistics, algo): for (ax, metric_name, label) in zip(env_axes, ['objectives', 'mean_sum_costs', 'average_costs'], ['Average reward return', 'Average cost return', 'Cost regret']): draw(ax, experiment_statistics['timesteps'], experiment_statistics[(metric_name + '_median')], experiment_statistics[(metric_name + '_upper')], experiment_statistics[(metric_name + '_lower')], label=resolve_name(algo)) ax.set_ylabel(label)
class SNResNetDiscriminator(chainer.Chain): def __init__(self, ch=64, activation=F.relu): super(SNResNetDiscriminator, self).__init__() self.activation = activation initializer = chainer.initializers.GlorotUniform() with self.init_scope(): self.block1 = OptimizedBlock(3, ch) self.block2 = BlockDisc(ch, (ch * 2), activation=activation, downsample=True) self.block3 = BlockDisc((ch * 2), (ch * 4), activation=activation, downsample=True) self.block4 = BlockDisc((ch * 4), (ch * 8), activation=activation, downsample=True) self.block5 = BlockDisc((ch * 8), (ch * 8), activation=activation, downsample=False) self.l6 = SNLinear((ch * 8), 1, initialW=initializer) def __call__(self, x): h = x h = self.block1(h) (h, h_n1) = self.block2(h, with_norm=True) (h, h_n2) = self.block3(h, with_norm=True) (h, h_n3) = self.block4(h, with_norm=True) (h, h_n4) = self.block5(h, with_norm=True) h = self.activation(h) h = F.sum(h, axis=(2, 3)) output = self.l6(h) return (output, F.stack([h_n1, h_n2, h_n3, h_n4], axis=0))
def test_move_to_device_trivial() -> None: in_dict = {'k1': torch.zeros(10), 'k2': torch.ones(4)} for k in in_dict: assert isinstance(in_dict[k], torch.Tensor) assert (in_dict[k].device == torch.device('cpu')) out_dict = move_to_device(in_dict, torch.device('cpu')) assert np.alltrue((list(in_dict.keys()) == list(out_dict.keys()))) for k in out_dict: assert isinstance(out_dict[k], torch.Tensor) assert (out_dict[k].device == torch.device('cpu'))
class Effect11392(BaseEffect): type = 'passive' def handler(fit, ship, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Hybrid Turret')), 'maxRange', ship.getModifiedItemAttr('shipBonusNavyDestroyerCaldari2'), skill='Caldari Destroyer', **kwargs)
class TestFork(TestCase): def test_fork(self): f1 = jsons.fork() f2 = jsons.fork() f3 = jsons.fork(fork_inst=f1) jsons.set_serializer((lambda *_, **__: 'f1'), str, fork_inst=f1) jsons.set_serializer((lambda *_, **__: 'f2'), str, fork_inst=f2) jsons.set_serializer((lambda *_, **__: 3), int, fork_inst=f3) f4 = jsons.fork(fork_inst=f1) self.assertEqual('f1', jsons.dump('I wanted a fork on the table.', fork_inst=f1)) self.assertEqual('f2', jsons.dump('I wanted a fork on the table.', fork_inst=f2)) self.assertEqual('f3', jsons.dump('f3', fork_inst=f3)) self.assertEqual(3, jsons.dump(42, fork_inst=f3)) self.assertEqual('f1', jsons.dump('I wanted a fork on the table.', fork_inst=f4))
def test_patched_errwindow(capfd, mocker, monkeypatch): monkeypatch.setattr(checkpyver.sys, 'hexversion', ) monkeypatch.setattr(checkpyver.sys, 'exit', (lambda status: None)) try: import tkinter except ImportError: tk_mock = mocker.patch('qutebrowser.misc.checkpyver.Tk', spec=['withdraw'], new_callable=mocker.Mock) msgbox_mock = mocker.patch('qutebrowser.misc.checkpyver.messagebox', spec=['showerror']) else: tk_mock = mocker.patch('qutebrowser.misc.checkpyver.Tk', autospec=True) msgbox_mock = mocker.patch('qutebrowser.misc.checkpyver.messagebox', autospec=True) checkpyver.check_python_version() (stdout, stderr) = capfd.readouterr() assert (not stdout) assert (not stderr) tk_mock.assert_called_with() tk_mock().withdraw.assert_called_with() msgbox_mock.showerror.assert_called_with('qutebrowser: Fatal error!', unittest.mock.ANY)
def test_get_secret(): secret1 = factories.make_secret() secret2 = factories.make_secret() secrethash3 = factories.make_secret_hash() secrethash4 = factories.make_secret_hash() lock_state = HashTimeLockState(amount=10, expiration=10, secrethash=factories.UNIT_SECRETHASH) end_state = factories.create(factories.NettingChannelEndStateProperties()) end_state = factories.replace(end_state, secrethashes_to_lockedlocks={secrethash3: lock_state}, secrethashes_to_unlockedlocks={keccak(secret1): UnlockPartialProofState(lock=lock_state, secret=secret1)}, secrethashes_to_onchain_unlockedlocks={keccak(secret2): UnlockPartialProofState(lock=lock_state, secret=secret2)}) assert (get_secret(end_state, keccak(secret1)) == secret1) assert (get_secret(end_state, keccak(secret2)) == secret2) assert (get_secret(end_state, secrethash3) is None) assert (get_secret(end_state, secrethash4) is None)
class History(): def __init__(self, project, maxundos=None): self.project = project self._undo_list = [] self._redo_list = [] self._maxundos = maxundos self._load_history() self.project.data_files.add_write_hook(self.write) self.current_change = None def _load_history(self): if self.save: result = self.project.data_files.read_data('history') if (result is not None): to_change = change.DataToChange(self.project) for data in result[0]: self._undo_list.append(to_change(data)) for data in result[1]: self._redo_list.append(to_change(data)) def do(self, changes, task_handle=taskhandle.DEFAULT_TASK_HANDLE): try: self.current_change = changes changes.do(change.create_job_set(task_handle, changes)) finally: self.current_change = None if self._is_change_interesting(changes): self.undo_list.append(changes) self._remove_extra_items() del self.redo_list[:] def _remove_extra_items(self): if (len(self.undo_list) > self.max_undos): del self.undo_list[0:(len(self.undo_list) - self.max_undos)] def _is_change_interesting(self, changes): for resource in changes.get_changed_resources(): if (not self.project.is_ignored(resource)): return True return False def undo(self, change=None, drop=False, task_handle=taskhandle.DEFAULT_TASK_HANDLE): if (not self._undo_list): raise exceptions.HistoryError('Undo list is empty') if (change is None): change = self.undo_list[(- 1)] dependencies = self._find_dependencies(self.undo_list, change) self._move_front(self.undo_list, dependencies) self._perform_undos(len(dependencies), task_handle) result = self.redo_list[(- len(dependencies)):] if drop: del self.redo_list[(- len(dependencies)):] return result def redo(self, change=None, task_handle=taskhandle.DEFAULT_TASK_HANDLE): if (not self.redo_list): raise exceptions.HistoryError('Redo list is empty') if (change is None): change = self.redo_list[(- 1)] dependencies = self._find_dependencies(self.redo_list, change) self._move_front(self.redo_list, dependencies) self._perform_redos(len(dependencies), task_handle) return self.undo_list[(- len(dependencies)):] def _move_front(self, change_list, changes): for change in changes: change_list.remove(change) change_list.append(change) def _find_dependencies(self, change_list, change): index = change_list.index(change) return _FindChangeDependencies(change_list[index:])() def _perform_undos(self, count, task_handle): for i in range(count): self.current_change = self.undo_list[(- 1)] try: job_set = change.create_job_set(task_handle, self.current_change) self.current_change.undo(job_set) finally: self.current_change = None self.redo_list.append(self.undo_list.pop()) def _perform_redos(self, count, task_handle): for i in range(count): self.current_change = self.redo_list[(- 1)] try: job_set = change.create_job_set(task_handle, self.current_change) self.current_change.do(job_set) finally: self.current_change = None self.undo_list.append(self.redo_list.pop()) def contents_before_current_change(self, file): if (self.current_change is None): return None result = self._search_for_change_contents([self.current_change], file) if (result is not None): return result if (file.exists() and (not file.is_folder())): return file.read() else: return None def _search_for_change_contents(self, change_list, file): for change_ in reversed(change_list): if isinstance(change_, change.ChangeSet): result = self._search_for_change_contents(change_.changes, file) if (result is not None): return result if (isinstance(change_, change.ChangeContents) and (change_.resource == file)): return change_.old_contents def write(self): if self.save: data = [] to_data = change.ChangeToData() self._remove_extra_items() data.append([to_data(change_) for change_ in self.undo_list]) data.append([to_data(change_) for change_ in self.redo_list]) self.project.data_files.write_data('history', data) def get_file_undo_list(self, resource): return [change for change in self.undo_list if (resource in change.get_changed_resources())] def __str__(self): return ('History holds %s changes in memory' % (len(self.undo_list) + len(self.redo_list))) undo_list = property((lambda self: self._undo_list)) redo_list = property((lambda self: self._redo_list)) def tobe_undone(self): if self.undo_list: return self.undo_list[(- 1)] def tobe_redone(self): if self.redo_list: return self.redo_list[(- 1)] def max_undos(self): if (self._maxundos is None): return self.project.prefs.get('max_history_items', 100) else: return self._maxundos def save(self): return self.project.prefs.get('save_history', False) ('compress_history is no longer supported') def compress(self): return False def clear(self): del self.undo_list[:] del self.redo_list[:]
def test_perform_indexing_needs_reindexing(initialized_db, set_secscan_config): secscan = V4SecurityScanner(application, instance_keys, storage) secscan._secscan_api = mock.Mock() secscan._secscan_api.state.return_value = {'state': 'xyz'} secscan._secscan_api.index.return_value = ({'err': None, 'state': IndexReportState.Index_Finished}, 'xyz') for manifest in Manifest.select(): ManifestSecurityStatus.create(manifest=manifest, repository=manifest.repository, error_json={}, index_status=IndexStatus.COMPLETED, indexer_hash='abc', indexer_version=IndexerVersion.V4, last_indexed=(datetime.utcnow() - timedelta(seconds=(application.config['SECURITY_SCANNER_V4_REINDEX_THRESHOLD'] + 60))), metadata_json={}) secscan.perform_indexing_recent_manifests() secscan.perform_indexing() assert (ManifestSecurityStatus.select().count() == Manifest.select().count()) for mss in ManifestSecurityStatus.select(): assert (mss.indexer_hash == 'xyz')
class TestEvMenu(TestCase): menutree = {} startnode = 'start' cmdset_mergetype = 'Replace' cmdset_priority = 1 auto_quit = True auto_look = True auto_help = True cmd_on_exit = 'look' persistent = False startnode_input = '' kwargs = {} expect_all_nodes = False expected_tree = [] expected_node_texts = {} expected_node_options_count = {} expected_node_options = {} debug_output = False def _debug_output(self, indent, msg): if self.debug_output: print(((' ' * indent) + ansi.strip_ansi(msg))) def _test_menutree(self, menu): def _depth_first(menu, tree, visited, indent): nodename = menu.nodename options = menu.test_options if isinstance(options, dict): options = (options,) compare_text = self.expected_node_texts.get(nodename, None) if (compare_text is not None): compare_text = ansi.strip_ansi(compare_text.strip()) node_text = menu.test_nodetext self.assertIsNotNone(bool(node_text), 'node: {}: node-text is None, which was not expected.'.format(nodename)) if isinstance(node_text, tuple): (node_text, helptext) = node_text node_text = ansi.strip_ansi(node_text.strip()) self.assertTrue(node_text.startswith(compare_text), '\nnode "{}\':\nOutput:\n{}\n\nExpected (startswith):\n{}'.format(nodename, node_text, compare_text)) compare_options_count = self.expected_node_options_count.get(nodename, None) if (compare_options_count is not None): self.assertEqual(len(options), compare_options_count, 'Not the right number of options returned from node {}.'.format(nodename)) compare_options = self.expected_node_options.get(nodename, None) if compare_options: self.assertEqual(options, compare_options, 'Options returned from node {} does not match.'.format(nodename)) self._debug_output(indent, '*{}'.format(nodename)) subtree = [] if (not options): if (nodename not in visited): visited.append(nodename) subtree = nodename else: for (inum, optdict) in enumerate(options): (key, desc, execute, goto) = (optdict.get('key', ''), optdict.get('desc', None), optdict.get('exec', None), optdict.get('goto', None)) if (isinstance(key, (tuple, list)) and (len(key) > 1)): key = key[0] if (key == '_default'): key = 'test raw input' if (not key): key = str((inum + 1)) backup_menu = copy.copy(menu) menu.parse_input(key) nodename = menu.nodename if menu.close_menu.called: self._debug_output(indent, ' .. menu exited! Back to previous node.') menu = backup_menu menu.close_menu = MagicMock() visited.append(nodename) subtree.append(nodename) elif (nodename not in visited): visited.append(nodename) subtree.append(nodename) _depth_first(menu, subtree, visited, (indent + 2)) else: subtree.append(nodename) self._debug_output(indent, '-- {} ({}) -> {}'.format(key, desc, goto)) if subtree: tree.append(subtree) visited_nodes = [menu.nodename] traversal_tree = [menu.nodename] _depth_first(menu, traversal_tree, visited_nodes, 1) if self.expect_all_nodes: self.assertGreaterEqual(len(menu._menutree), len(visited_nodes)) self.assertEqual(traversal_tree, self.expected_tree) def setUp(self): self.menu = None if self.menutree: self.caller = MagicMock() self.caller.key = 'Test' self.caller2 = MagicMock() self.caller2.key = 'Test' self.caller.msg = MagicMock() self.caller2.msg = MagicMock() self.session = MagicMock() self.session.protocol_flags = {} self.session2 = MagicMock() self.session2.protocol_flags = {} self.caller.session = self.session self.caller2.session = self.session2 self.menu = evmenu.EvMenu(self.caller, self.menutree, startnode=self.startnode, cmdset_mergetype=self.cmdset_mergetype, cmdset_priority=self.cmdset_priority, auto_quit=self.auto_quit, auto_look=self.auto_look, auto_help=self.auto_help, cmd_on_exit=self.cmd_on_exit, persistent=False, startnode_input=self.startnode_input, session=self.session, **self.kwargs) self.pmenu = evmenu.EvMenu(self.caller2, self.menutree, startnode=self.startnode, cmdset_mergetype=self.cmdset_mergetype, cmdset_priority=self.cmdset_priority, auto_quit=self.auto_quit, auto_look=self.auto_look, auto_help=self.auto_help, cmd_on_exit=self.cmd_on_exit, persistent=True, startnode_input=self.startnode_input, session=self.session2, **self.kwargs) self.menu.close_menu = MagicMock() self.pmenu.close_menu = MagicMock() def test_menu_structure(self): if self.menu: self._test_menutree(self.menu) self._test_menutree(self.pmenu)
def main(): parser = build_parser() args = parser.parse_args() (_, input_ext) = os.path.splitext(args.input) if (input_ext == '.h5ad'): x = ad.read_h5ad(args.input) elif (input_ext == '.h5'): x = sc.read_10x_h5(args.input) else: raise ValueError(f'Unrecognized file extension: {args.input}') logging.info(f'Read input: {x}') logging.info('Reading gtf for gene name map') gene_name_map = utils.read_gtf_gene_symbol_to_id() x_df = pd.DataFrame(utils.ensure_arr(x.X), index=x.obs_names, columns=x.var_names).T assert np.all((x_df.values >= 0.0)) x_df.index = [gene_name_map[g] for g in x_df.index] logging.info(f'Writing output to {args.output_table_txt}') x_df.to_csv(args.output_table_txt, sep='\t')
def main(): args = parse_args() max_length = 5 num_beams = 4 logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger.setLevel(logging.INFO) transformers.utils.logging.set_verbosity_error() device = torch.device(args.device) (model, tokenizer) = load_model_tokenizer(args.model_name_or_path, device) if (model.config.decoder_start_token_id is None): raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined') model.to(device) if args.max_length: max_length = args.max_length if args.num_beams: num_beams = args.num_beams if args.output_file_path: output_name = args.output_file_path else: output_name = 'BART.onnx' logger.info('Exporting model to ONNX') export_and_validate_model(model, tokenizer, output_name, num_beams, max_length)
def load_xml(p): tree = ET.parse(p) root = tree.getroot() (title, byline, abs, paras) = ([], [], [], []) title_node = list(root.iter('hedline')) if (len(title_node) > 0): try: title = [p.text.lower().split() for p in list(title_node[0].iter('hl1'))][0] except: print(p) else: return (None, None) byline_node = list(root.iter('byline')) byline_node = [n for n in byline_node if (n.attrib['class'] == 'normalized_byline')] if (len(byline_node) > 0): byline = byline_node[0].text.lower().split() abs_node = list(root.iter('abstract')) if (len(abs_node) > 0): try: abs = [p.text.lower().split() for p in list(abs_node[0].iter('p'))][0] except: print(p) else: return (None, None) abs = ' '.join(abs).split(';') abs[(- 1)] = abs[(- 1)].replace('(m)', '') abs[(- 1)] = abs[(- 1)].replace('(s)', '') for ww in nyt_remove_words: abs[(- 1)] = abs[(- 1)].replace((('(' + ww) + ')'), '') abs = [p.split() for p in abs] abs = [p for p in abs if (len(p) > 2)] for doc_node in root.iter('block'): att = doc_node.get('class') if (att == 'full_text'): paras = [p.text.lower().split() for p in list(doc_node.iter('p'))] break if (len(paras) > 0): if (len(byline) > 0): paras = ([(((title + ['[unused4]']) + byline) + ['[unused5]'])] + paras) else: paras = ([(title + ['[unused4]'])] + paras) return (paras, abs) else: return (None, None)
def get_thumbnail_from_file(fileobj, boundary) -> (GdkPixbuf.Pixbuf | None): assert fileobj try: path = fileobj.name assert isinstance(path, fsnative), path return get_thumbnail(path, boundary) except GLib.GError: try: loader = GdkPixbuf.PixbufLoader() loader.set_size(*boundary) loader.write(fileobj.read()) loader.close() fileobj.seek(0, 0) return loader.get_pixbuf() except (OSError, GLib.GError) as e: print_w(f"Couldn't load thumbnail with PixbufLoader either: {e}") return None
class Subtensor(COp): check_input = False view_map = {0: [0]} _f16_ok = True __props__ = ('idx_list',) def __init__(self, idx_list): self.idx_list = tuple(map(index_vars_to_types, idx_list)) def make_node(self, x, *inputs): x = as_tensor_variable(x) inputs = tuple((as_nontensor_scalar(a) for a in inputs)) idx_list = list(self.idx_list) if (len(idx_list) > x.type.ndim): raise IndexError('too many indices for array') input_types = get_slice_elements(idx_list, (lambda entry: isinstance(entry, Type))) assert (len(inputs) == len(input_types)) for (input, expected_type) in zip(inputs, input_types): if (not expected_type.is_super(input.type)): raise TypeError(f'Incompatible types for Subtensor template. Expected {input.type}, got {expected_type}.') padded = [*get_idx_list(((None,) + inputs), self.idx_list), *([slice(None, None, None)] * (x.type.ndim - len(idx_list)))] out_shape = [] def extract_const(value): if (value is None): return (value, True) try: value = get_underlying_scalar_constant_value(value) return (value, True) except NotScalarConstantError: return (value, False) for (the_slice, length) in zip(padded, x.type.shape): if (not isinstance(the_slice, slice)): continue if (length is None): out_shape.append(None) continue start = the_slice.start stop = the_slice.stop step = the_slice.step is_slice_const = True (start, is_const) = extract_const(start) is_slice_const = (is_slice_const and is_const) (stop, is_const) = extract_const(stop) is_slice_const = (is_slice_const and is_const) (step, is_const) = extract_const(step) is_slice_const = (is_slice_const and is_const) if (not is_slice_const): out_shape.append(None) continue slice_length = len(range(*slice(start, stop, step).indices(length))) out_shape.append(slice_length) return Apply(self, ((x,) + inputs), [tensor(dtype=x.type.dtype, shape=out_shape)]) def perform(self, node, inputs, out_): (out,) = out_ x = inputs[0] cdata = get_idx_list(inputs, self.idx_list) if (len(cdata) == 1): cdata = cdata[0] out[0] = np.asarray(x.__getitem__(cdata)) def infer_shape(self, fgraph, node, shapes): xshp = shapes[0] assert (len(xshp) == node.inputs[0].ndim) outshp = [] actual_idx_list = list(get_idx_list(node.inputs, self.idx_list)) padded = (actual_idx_list + ([slice(None, None, None)] * (len(xshp) - len(self.idx_list)))) i = 0 for (idx, xl) in zip(padded, xshp): if isinstance(idx, slice): if ((idx.start in [None, 0]) and (idx.stop in [None, sys.maxsize]) and ((idx.step is None) or (idx.step == 1))): outshp.append(xl) else: cnf = get_canonical_form_slice(idx, xl)[0] if (cnf.step == 1): length = (cnf.stop - cnf.start) else: length = ((((cnf.stop - cnf.start) - 1) // cnf.step) + 1) outshp.append(length) i += 1 else: pass assert (i == node.outputs[0].ndim) assert (len(outshp) == node.outputs[0].ndim) return [outshp] def grad(self, inputs, grads): (gz,) = grads x = inputs[0] rest = inputs[1:] if (x.dtype in discrete_dtypes): first = x.zeros_like().astype(config.floatX) else: first = IncSubtensor(self.idx_list)(x.zeros_like(), gz, *rest) return ([first] + ([DisconnectedType()()] * len(rest))) def connection_pattern(self, node): rval = [[True]] for ipt in node.inputs[1:]: rval.append([False]) return rval def __hash__(self): msg = [] for entry in self.idx_list: if isinstance(entry, slice): msg += [(entry.start, entry.stop, entry.step)] else: msg += [entry] idx_list = tuple(msg) return hash(idx_list) def str_from_slice(entry): if entry.step: return ':'.join((('start' if entry.start else ''), ('stop' if entry.stop else ''), 'step')) if entry.stop: return f"{('start' if entry.start else '')}:stop" if entry.start: return 'start:' return ':' def str_from_indices(idx_list): indices = [] letter_indexes = 0 for entry in idx_list: if isinstance(entry, slice): indices.append(Subtensor.str_from_slice(entry)) else: indices.append(('ijk'[(letter_indexes % 3)] * ((letter_indexes // 3) + 1))) letter_indexes += 1 return ', '.join(indices) def __str__(self): return f'{self.__class__.__name__}{{{self.str_from_indices(self.idx_list)}}}' def default_helper_c_code_args(): return {'c_prefix': 'PyArray', 'strides_mul': 1} def helper_c_code(node, name, inputs, outputs, sub, idx_list, view_ndim, c_prefix=None, strides_mul=None): default_args = Subtensor.default_helper_c_code_args() if (strides_mul is None): strides_mul = default_args['strides_mul'] if (c_prefix is None): c_prefix = default_args['c_prefix'] fail = sub['fail'] init_cmds = [] is_slice = [] NONE_CODE = (sys.maxsize - 1) pos = [0, 1] def inc_spec_pos(amt): pos[0] += amt def inc_input_pos(amt): pos[1] += amt def spec_pos(): return pos[0] def input_pos(): return pos[1] def init_entry(entry, depth=0): if isinstance(entry, (np.integer, int)): init_cmds.append(('subtensor_spec[%i] = %i;' % (spec_pos(), entry))) inc_spec_pos(1) if (depth == 0): is_slice.append(0) elif isinstance(entry, Type): init_cmds.append(('subtensor_spec[%i] = %s;' % (spec_pos(), inputs[input_pos()]))) inc_spec_pos(1) inc_input_pos(1) if (depth == 0): is_slice.append(0) elif (entry is None): init_cmds.append(('subtensor_spec[%i] = %i;' % (spec_pos(), NONE_CODE))) inc_spec_pos(1) if (depth == 0): is_slice.append(0) elif ((depth == 0) and isinstance(entry, slice)): init_entry(entry.start, (depth + 1)) init_entry(entry.stop, (depth + 1)) init_entry(entry.step, (depth + 1)) is_slice.append(1) else: assert 0, entry for entry in idx_list: init_entry(entry) assert (input_pos() == len(inputs)), input_pos() assert (len(is_slice) <= node.inputs[0].ndim), node.inputs[0].ndim len_is_slice = len(is_slice) len_subtensor_spec = spec_pos() subensor_spec = f'npy_intp subtensor_spec[{len_subtensor_spec}];' if (len_subtensor_spec == 0): subensor_spec = 'npy_intp * subtensor_spec = NULL;' if is_slice: is_slice_init = (('int is_slice[] = {' + ','.join([str(s) for s in is_slice])) + '};') else: is_slice_init = 'int* is_slice = NULL;' subtensor_init = '\n'.join(init_cmds) (x,) = inputs[:1] (z,) = outputs if view_ndim: rval = f''' // Argument of the view npy_intp xview_dims[{view_ndim}]; npy_intp xview_strides[{view_ndim}]; ''' else: rval = '\n // Argument of the view\n npy_intp* xview_dims = NULL;\n npy_intp* xview_strides = NULL;\n\n ' rval += ('\n // One more argument of the view\n npy_intp xview_offset = 0;\n\n // The subtensor is created by iterating over the dimensions\n // and updating stride, shape, and data pointers\n\n %(is_slice_init)s\n %(subensor_spec)s\n %(subtensor_init)s;\n int spec_pos = 0; //position in subtensor_spec\n int inner_ii = 0; // the current dimension of zview\n int outer_ii = 0; // current dimension of z\n\n\n for (; outer_ii < %(len_is_slice)s; ++outer_ii)\n {\n if (is_slice[outer_ii])\n {\n npy_intp length = %(c_prefix)s_DIMS(%(x)s)[outer_ii];\n npy_intp slicelength;\n npy_intp start = subtensor_spec[spec_pos+0];\n npy_intp stop = subtensor_spec[spec_pos+1];\n npy_intp step = subtensor_spec[spec_pos+2];\n if (step == %(NONE_CODE)s) step = 1;\n\n npy_intp defstart = step < 0 ? length-1 : 0;\n npy_intp defstop = step < 0 ? -1 : length;\n\n // logic adapted from\n // PySlice_GetIndicesEx in python source\n if (!step)\n {\n PyErr_Format(PyExc_ValueError,\n "slice step cannot be zero");\n %(fail)s;\n }\n\n if (start == %(NONE_CODE)s)\n {\n start = defstart;\n }\n else\n {\n if (start < 0) start += length;\n if (start < 0) start = (step < 0) ? -1 : 0;\n if (start >= length)\n start = (step < 0) ? length - 1 : length;\n }\n\n if (stop == %(NONE_CODE)s)\n {\n stop = defstop;\n }\n else\n {\n if (stop < 0) stop += length;\n if (stop < 0) stop = (step < 0) ? -1 : 0;\n if (stop >= length)\n stop = (step < 0) ? length - 1 : length;\n }\n\n if ((step < 0 && stop >= start)\n || (step > 0 && start >= stop)) {\n slicelength = 0;\n }\n else if (step < 0) {\n slicelength = (stop-start+1)/step+1;\n }\n else {\n slicelength = (stop-start-1)/step+1;\n }\n\n if (0){\n fprintf(stdout, "start %%zi\\n", start);\n fprintf(stdout, "stop %%zi\\n", stop);\n fprintf(stdout, "step %%zi\\n", step);\n fprintf(stdout, "length %%zi\\n", length);\n fprintf(stdout, "slicelength %%zi\\n", slicelength);\n }\n\n assert (slicelength <= length);\n\n xview_offset += (npy_intp)%(c_prefix)s_STRIDES(%(x)s)[outer_ii]\n * start * %(strides_mul)s;\n xview_dims[inner_ii] = slicelength;\n xview_strides[inner_ii] = (npy_intp)%(c_prefix)s_STRIDES(%(x)s)[outer_ii] * step;\n\n inner_ii += 1;\n spec_pos += 3;\n }\n else // tuple coord `outer_ii` is an int\n {\n int idx = subtensor_spec[spec_pos];\n if (idx < 0) idx += %(c_prefix)s_DIMS(%(x)s)[outer_ii];\n if (idx >= 0)\n {\n if (idx < %(c_prefix)s_DIMS(%(x)s)[outer_ii])\n {\n xview_offset += (npy_intp)%(c_prefix)s_STRIDES(%(x)s)[outer_ii] * idx *\n %(strides_mul)s;\n }\n else\n {\n PyErr_Format(PyExc_IndexError,"index out of bounds");\n %(fail)s;\n }\n }\n else\n {\n PyErr_Format(PyExc_IndexError,"index out of bounds");\n %(fail)s;\n }\n\n spec_pos += 1;\n }\n }\n assert (inner_ii <= %(view_ndim)s);\n while (inner_ii < %(view_ndim)s)\n {\n assert (outer_ii < %(c_prefix)s_NDIM(%(x)s));\n xview_dims[inner_ii] = %(c_prefix)s_DIMS(%(x)s)[outer_ii];\n xview_strides[inner_ii] = %(c_prefix)s_STRIDES(%(x)s)[outer_ii];\n\n inner_ii += 1;\n outer_ii += 1;\n }\n ' % locals()) return rval def helper_c_code_cache_version(): return (9,) def c_code(self, node, name, inputs, outputs, sub): if (not isinstance(node.inputs[0].type, TensorType)): raise NotImplementedError() x = inputs[0] (z,) = outputs ndim = node.inputs[0].ndim view_ndim = node.outputs[0].ndim fail = sub['fail'] decl = 'PyArrayObject * xview = NULL;' checkNDim = ('\n if (PyArray_NDIM(%(x)s) != %(ndim)s){\n PyErr_SetString(PyExc_ValueError,\n "Expected %(ndim)s dimensions input"\n );\n %(fail)s\n }\n ' % locals()) get_xview = self.helper_c_code(node, name, inputs, outputs, sub, self.idx_list, view_ndim) build_view = ('\n //TODO: give this Op a second output so that this view can be cached\n //TODO: alternatively, fix the memory leak on failure\n Py_INCREF(PyArray_DESCR(%(x)s));\n xview = (PyArrayObject*)PyArray_NewFromDescr(\n &PyArray_Type,\n PyArray_DESCR(%(x)s),\n %(view_ndim)s,\n xview_dims,\n xview_strides,\n PyArray_BYTES(%(x)s) + xview_offset,\n PyArray_FLAGS(%(x)s),\n NULL);\n assert (PyArray_NDIM(xview) == %(view_ndim)s);\n if (!xview)\n {\n %(fail)s;\n }\n ' % locals()) finish_view = f''' Py_XDECREF({z}); Py_INCREF(py_{x}); PyArray_SetBaseObject(xview, py_{x}); assert(py_{x} == (PyObject*){x}); {z} = xview; ''' return ((((((decl + checkNDim) + '{') + get_xview) + build_view) + finish_view) + '}') def c_code_cache_version(self): hv = self.helper_c_code_cache_version() if (len(hv) == 0): return () return (4, hv) def R_op(self, inputs, eval_points): if (eval_points[0] is None): return [None] return self(eval_points[0], *inputs[1:], return_list=True)
def _group_ptr_queries_with_known_answers(now_millis: float_, multicast: bool_, question_with_known_answers: _QuestionWithKnownAnswers) -> List[DNSOutgoing]: query_by_size: Dict[(DNSQuestion, int)] = {question: (question.max_size + sum((answer.max_size_compressed for answer in known_answers))) for (question, known_answers) in question_with_known_answers.items()} max_bucket_size = (_MAX_MSG_TYPICAL - _DNS_PACKET_HEADER_LEN) query_buckets: List[_DNSPointerOutgoingBucket] = [] for question in sorted(query_by_size, key=query_by_size.get, reverse=True): max_compressed_size = query_by_size[question] answers = question_with_known_answers[question] for query_bucket in query_buckets: if ((query_bucket.bytes + max_compressed_size) <= max_bucket_size): query_bucket.add(max_compressed_size, question, answers) break else: query_bucket = _DNSPointerOutgoingBucket(now_millis, multicast) query_bucket.add(max_compressed_size, question, answers) query_buckets.append(query_bucket) return [query_bucket.out for query_bucket in query_buckets]
class TestForbiddenIOFunctionNoAllowedChecker(pylint.testutils.CheckerTestCase): CHECKER_CLASS = IOFunctionChecker CONFIG = {} def setup(self): self.setup_method() def test_message_function_no_allowed(self): src = '\n def my_function(string: str):\n string = "hello"\n name = input()\n return name + string\n ' mod = astroid.parse(src) (call, *_) = mod.nodes_of_class(nodes.Call) with self.assertAddsMessages(pylint.testutils.MessageTest(msg_id='forbidden-IO-function', node=call, args=call.func.name), ignore_position=True): self.checker.visit_call(call) def test_message_class_no_allowed(self): src = '\n class Example:\n def my_function(self):\n name = input()\n return name\n ' mod = astroid.parse(src) (call, *_) = mod.nodes_of_class(nodes.Call) with self.assertAddsMessages(pylint.testutils.MessageTest(msg_id='forbidden-IO-function', node=call, args=call.func.name), ignore_position=True): self.checker.visit_call(call) def test_message_global(self): src = '\n name = input()\n ' mod = astroid.parse(src) (call, *_) = mod.nodes_of_class(nodes.Call) with self.assertAddsMessages(pylint.testutils.MessageTest(msg_id='forbidden-IO-function', node=call, args=call.func.name), ignore_position=True): self.checker.visit_call(call)
def parse_source_file(mod: StubSource, mypy_options: MypyOptions) -> None: assert (mod.path is not None), 'Not found module was not skipped' with open(mod.path, 'rb') as f: data = f.read() source = mypy.util.decode_python_encoding(data) errors = Errors(mypy_options) mod.ast = mypy.parse.parse(source, fnam=mod.path, module=mod.module, errors=errors, options=mypy_options) mod.ast._fullname = mod.module if errors.is_blockers(): for m in errors.new_messages(): sys.stderr.write(f'''{m} ''') sys.exit(1)
def test_build_wheel_extended() -> None: with temporary_directory() as tmp_dir, cwd(os.path.join(fixtures, 'extended')): filename = api.build_wheel(tmp_dir) whl = (Path(tmp_dir) / filename) assert whl.exists() validate_wheel_contents(name='extended', version='0.1', path=whl.as_posix())
def run(train_batch_size, epochs, lr, weight_decay, config, exp_id, log_dir, disable_gpu=False): if (config['test_ratio'] is not None): (train_loader, val_loader, test_loader) = get_data_loaders(config, train_batch_size, exp_id) else: (train_loader, val_loader) = get_data_loaders(config, train_batch_size, exp_id) module = import_module(('model.' + 'MWCNN')) mw_model = module.make_model(args).to('cuda') model = Model(args).to('cuda') writer = SummaryWriter(log_dir=log_dir) if os.path.exists(os.path.join(args.log_dir_MW, 'state.pkl.epoch444')): mw_model.load_state_dict(torch.load(os.path.join(args.log_dir_MW, 'state.pkl.epoch444')), strict=False) logger.info('Successfully loaded pretrained Epoch_MW_model.') else: mw_model.load_state_dict(torch.load(os.path.join(args.log_dir_MW, 'state.pkl.epoch444')), strict=False) logger.info('Successfully loaded pretrained newly saved MW_model.') if os.path.exists(os.path.join(args.log_dir_IQA2, 'state.pkl')): model.load_state_dict(torch.load(os.path.join(args.log_dir_IQA2, 'state.pkl')), strict=False) logger.info('Successfully loaded pretrained IQA_model.') optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay) if os.path.exists(os.path.join(args.log_dir_IQA2, 'optimizer_state.pkl')): optimizer.load_state_dict(torch.load(os.path.join(args.log_dir_IQA2, 'optimizer_state.pkl'))) logger.info('Successfully loaded optimizer IQA_parameters.') loss_avg = Loss(args) iter = 0 for epoch in range(epochs)[1:]: epoch_loss = [] for (batch_num, (im_mw, imp_iwt, gt_iwt, im_dmos)) in enumerate(train_loader): iter += 1 mw_model.eval() model.train() optimizer.zero_grad() pre_iwt = mw_model(im_mw) pre_iwt = [LocalNormalization(pre_iwt[i][0].detach().cpu().numpy()) for i in range(train_batch_size)] pre_iwt = torch.stack(pre_iwt).cuda() error_map = (pre_iwt - imp_iwt) pre_score = model(imp_iwt, error_map) loss_batch = loss_avg(pre_score, im_dmos) plt.imsave(os.path.join(args.log_dir_IQA2, 'hr.jpg'), gt_iwt.detach().cpu().numpy()[0][0]) plt.imsave(os.path.join(args.log_dir_IQA2, 'sr.jpg'), pre_iwt.detach().cpu().numpy()[0][0]) plt.imsave(os.path.join(args.log_dir_IQA2, 'lr.jpg'), imp_iwt.detach().cpu().numpy()[0][0]) loss_batch.backward() optimizer.step() torch.save(model.state_dict(), os.path.join(args.log_dir_IQA2, 'state.pkl')) torch.save(optimizer.state_dict(), os.path.join(args.log_dir_IQA2, 'optimizer_state.pkl')) logger.info('[EPOCH{}:ITER{}] <LOSS>={:.4}'.format(epoch, iter, loss_batch.item())) writer.add_scalar('Train/Iter/Loss', loss_batch.item(), iter) epoch_loss.append(loss_batch.item()) epoch_loss_log = np.mean(epoch_loss) writer.add_scalar('Train/Epoch/Loss', epoch_loss_log, epoch) with torch.no_grad(): mw_model.eval() model.eval() (srocc, krocc, plcc, rmse, mae) = validate(mw_model, model, val_loader) logger.info('Validation Results - Epoch: {} <PLCC>: {:.4f} <SROCC>: {:.4f} <KROCC>: {:.4f} <RMSE>: {:.6f} <MAE>: {:.6f}'.format(epoch, plcc, srocc, krocc, rmse, mae)) writer.add_scalar('validation/SROCC', srocc, epoch) writer.add_scalar('validation/KROCC', krocc, epoch) writer.add_scalar('validation/PLCC', plcc, epoch) writer.add_scalar('validation/RMSE', rmse, epoch) writer.add_scalar('validation/MAE', mae, epoch) if ((epoch % 1) == 0): torch.save(model.state_dict(), os.path.join(args.log_dir_IQA2, 'state.pkl.epoch{}'.format(epoch))) print('Successfully saved model of EPOCH{}'.format(epoch)) writer.close()
class OpenFF(Parametrisation): type: Literal['OpenFF'] = 'OpenFF' force_field: str = 'openff_unconstrained-2.0.0.offxml' def start_message(self, **kwargs) -> str: return f'Parametrising molecule and fragments with {self.force_field}.' def is_available(cls) -> bool: off = which_import('openff.toolkit', return_bool=True, raise_error=True, raise_msg='Please install via `conda install openff-toolkit`.') return off def _improper_torsion_ordering(cls) -> str: return 'smirnoff' ('force_field') def _check_forcefield(cls, force_field: str) -> str: forcebalance_torsion_optimisation_ff = ['result/optimize/bespoke.offxml'] openff_forcefields = ([ff.lower() for ff in get_available_force_fields()] + forcebalance_torsion_optimisation_ff) if (force_field in openff_forcefields): return force_field.lower() else: raise ValueError(f'The force field {force_field} was not found by the openff-toolkit please chose from {openff_forcefields}.') def _build_system(self, molecule: 'Ligand', input_files: Optional[List[str]]=None) -> System: off_molecule = Molecule.from_rdkit(molecule.to_rdkit(), allow_undefined_stereo=True, hydrogens_are_explicit=True) off_topology = off_molecule.to_topology() forcefield = ForceField(self.force_field, allow_cosmetic_attributes=True) if ('Constraints' in forcefield._parameter_handlers): del forcefield._parameter_handlers['Constraints'] try: system = forcefield.create_openmm_system(off_topology) except (UnassignedValenceParameterException, UnassignedBondParameterException, UnassignedProperTorsionParameterException, UnassignedAngleParameterException, UnassignedMoleculeChargeException, TypeError): new_bond = BondHandler.BondType(smirks='[*:1]~[*:2]', length='0 * angstrom', k='0.0 * angstrom**-2 * mole**-1 * kilocalorie') new_angle = AngleHandler.AngleType(smirks='[*:1]~[*:2]~[*:3]', angle='0.0 * degree', k='0.0 * mole**-1 * radian**-2 * kilocalorie') new_torsion = ProperTorsionHandler.ProperTorsionType(smirks='[*:1]~[*:2]~[*:3]~[*:4]', periodicity1='1', phase1='0.0 * degree', k1='0.0 * mole**-1 * kilocalorie', periodicity2='2', phase2='180.0 * degree', k2='0.0 * mole**-1 * kilocalorie', periodicity3='3', phase3='0.0 * degree', k3='0.0 * mole**-1 * kilocalorie', periodicity4='4', phase4='180.0 * degree', k4='0.0 * mole**-1 * kilocalorie', idivf1='1.0', idivf2='1.0', idivf3='1.0', idivf4='1.0') new_vdw = vdWHandler.vdWType(smirks='[*:1]', epsilon=(0 * unit.kilocalories_per_mole), sigma=(0 * unit.angstroms)) new_generics = {'Bonds': new_bond, 'Angles': new_angle, 'ProperTorsions': new_torsion, 'vdW': new_vdw} for (key, val) in new_generics.items(): forcefield.get_parameter_handler(key).parameters.insert(0, val) del forcefield._parameter_handlers['ToolkitAM1BCC'] del forcefield._parameter_handlers['Electrostatics'] system = forcefield.create_openmm_system(off_topology) return system
def test_context_formatting(hatch, helpers, temp_dir, config_file): config_file.model.template.plugins['default']['tests'] = False config_file.save() project_name = 'My.App' with temp_dir.as_cwd(): result = hatch('new', project_name) assert (result.exit_code == 0), result.output project_path = (temp_dir / 'my-app') data_path = (temp_dir / 'data') data_path.mkdir() project = Project(project_path) helpers.update_project_environment(project, 'default', {'matrix': [{'version': ['9000', '3.14'], 'py': ['39', '310']}], 'dependencies': ['foo {root:uri}/../foo']}) helpers.update_project_environment(project, 'foo', {'env-vars': {'BAR': '{env:FOO_BAZ}'}, 'dependencies': ['pydantic']}) with project_path.as_cwd(env_vars={'FOO_BAZ': 'FOO_BAR'}): result = hatch('env', 'show', '--ascii') assert (result.exit_code == 0), result.output assert (helpers.remove_trailing_spaces(result.output) == helpers.dedent('\n Standalone\n +------++++\n | Name | Type | Dependencies | Environment variables |\n +======++++\n | foo | virtual | pydantic | BAR=FOO_BAR |\n +------++++\n Matrices\n +++++\n | Name | Type | Envs | Dependencies |\n +++++\n | default | virtual | py39-9000 | {root:uri}/../foo |\n | | | py39-3.14 | |\n | | | py310-9000 | |\n | | | py310-3.14 | |\n +++++\n '))
class BoolAsk(Bool): def __init__(self, *, none_ok: bool=False, completions: _Completions=None) -> None: super().__init__(none_ok=none_ok, completions=completions) self.valid_values = ValidValues('true', 'false', 'ask') def to_py(self, value: Union[(bool, str)]) -> Union[(bool, str, None)]: if (isinstance(value, str) and (value.lower() == 'ask')): return 'ask' return super().to_py(value) def from_str(self, value: str) -> Union[(bool, str, None)]: if (value.lower() == 'ask'): return 'ask' return super().from_str(value) def to_str(self, value: Union[(bool, str, None)]) -> str: mapping = {None: '', True: 'true', False: 'false', 'ask': 'ask'} return mapping[value]
class TestTextInput(unittest.TestCase): def test_init(self): widget = gui.TextInput(single_line=True, hint='test text input') self.assertIn('test text input', widget.repr()) assertValidHTML(widget.repr()) widget = gui.TextInput(single_line=False, hint='test text input') self.assertIn('test text input', widget.repr()) assertValidHTML(widget.repr())
class TrainWithLogger(): def reset_log(self): self.log_components = OrderedDict() def log_append(self, log_key, length, loss_components): for (key, value) in loss_components.items(): key_name = f'{log_key}/{key}' (count, sum) = self.log_components.get(key_name, (0, 0.0)) self.log_components[key_name] = ((count + length), (sum + (length * value.detach().cpu().item()))) def flush_log(self, epoch, iterator=None): log_components = OrderedDict() iterator_log_component = OrderedDict() for (key, value) in self.log_components.items(): (count, sum) = value to_log = (sum / count) log_components[key] = to_log (log_key, name_key) = key.split('/') iterator_log_name = f'{log_key[0]}{name_key[0]}'.upper() iterator_log_component[iterator_log_name] = to_log postfix = ','.join(('{}:{:.2e}'.format(key, iterator_log_component[key]) for key in iterator_log_component.keys())) if (iterator is not None): iterator.set_postfix_str(postfix) wandb.log(log_components, step=epoch) self.log_components = OrderedDict()
class HeightCompression(nn.Module): def __init__(self, model_cfg, **kwargs): super().__init__() self.model_cfg = model_cfg self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES def forward(self, batch_dict): encoded_spconv_tensor = batch_dict['encoded_spconv_tensor'] spatial_features = encoded_spconv_tensor.dense() (N, C, D, H, W) = spatial_features.shape spatial_features = spatial_features.view(N, (C * D), H, W) batch_dict['spatial_features'] = spatial_features batch_dict['spatial_features_stride'] = batch_dict['encoded_spconv_tensor_stride'] return batch_dict
class _ExtractMethodParts(ast.RopeNodeVisitor): def __init__(self, info): self.info = info self.info_collector = self._create_info_collector() self.info.kind = self._get_kind_by_scope() self._check_constraints() def _get_kind_by_scope(self): if self._extacting_from_staticmethod(): return 'staticmethod' elif self._extracting_from_classmethod(): return 'classmethod' return self.info.kind def _check_constraints(self): if (self._extracting_staticmethod() or self._extracting_classmethod()): if (not self.info.method): raise RefactoringError('Cannot extract to staticmethod/classmethod outside class') def _extacting_from_staticmethod(self): return (self.info.method and (_get_function_kind(self.info.scope) == 'staticmethod')) def _extracting_from_classmethod(self): return (self.info.method and (_get_function_kind(self.info.scope) == 'classmethod')) def get_definition(self): if self.info.global_: return ('\n%s\n' % self._get_function_definition()) else: return ('\n%s' % self._get_function_definition()) def get_replacement_pattern(self): variables = [] variables.extend(self._find_function_arguments()) variables.extend(self._find_function_returns()) return similarfinder.make_pattern(self._get_call(), variables) def get_body_pattern(self): variables = [] variables.extend(self._find_function_arguments()) variables.extend(self._find_function_returns()) variables.extend(self._find_temps()) return similarfinder.make_pattern(self._get_body(), variables) def _get_body(self): result = sourceutils.fix_indentation(self.info.extracted, 0) if self.info.one_line: result = ('(%s)' % result) return result def _find_temps(self): return usefunction.find_temps(self.info.project, self._get_body()) def get_checks(self): if (self.info.method and (not self.info.make_global)): if (_get_function_kind(self.info.scope) == 'method'): class_name = similarfinder._pydefined_to_str(self.info.scope.parent.pyobject) return {self._get_self_name(): ('type=' + class_name)} return {} def _create_info_collector(self): zero = (self.info.scope.get_start() - 1) start_line = (self.info.region_lines[0] - zero) end_line = (self.info.region_lines[1] - zero) info_collector = _FunctionInformationCollector(start_line, end_line, self.info.global_) body = self.info.source[self.info.scope_region[0]:self.info.scope_region[1]] node = _parse_text(body) info_collector.visit(node) return info_collector def _get_function_definition(self): args = self._find_function_arguments() returns = self._find_function_returns() result = [] self._append_decorators(result) result.append(('def %s:\n' % self._get_function_signature(args))) unindented_body = self._get_unindented_function_body(returns) indents = sourceutils.get_indent(self.info.project) function_body = sourceutils.indent_lines(unindented_body, indents) result.append(function_body) definition = ''.join(result) return (definition + '\n') def _append_decorators(self, result): if self._extracting_staticmethod(): result.append('\n') elif self._extracting_classmethod(): result.append('\n') def _extracting_classmethod(self): return (self.info.kind == 'classmethod') def _extracting_staticmethod(self): return (self.info.kind == 'staticmethod') def _get_function_signature(self, args): args = list(args) prefix = '' if (self._extracting_method() or self._extracting_classmethod()): self_name = self._get_self_name() if (self_name is None): raise RefactoringError('Extracting a method from a function with no self argument.') if (self_name in args): args.remove(self_name) args.insert(0, self_name) return ((prefix + self.info.new_name) + ('(%s)' % self._get_comma_form(args))) def _extracting_method(self): return ((not self._extracting_staticmethod()) and (self.info.method and (not self.info.make_global) and (_get_function_kind(self.info.scope) == 'method'))) def _get_self_name(self): if self._extracting_classmethod(): return 'cls' return self._get_scope_self_name() def _get_scope_self_name(self): if (self.info.scope.pyobject.get_kind() == 'staticmethod'): return param_names = self.info.scope.pyobject.get_param_names() if param_names: return param_names[0] def _get_function_call(self, args): return '{prefix}{name}({args})'.format(prefix=self._get_function_call_prefix(args), name=self.info.new_name, args=self._get_comma_form(args)) def _get_function_call_prefix(self, args): prefix = '' if (self.info.method and (not self.info.make_global)): if (self._extracting_staticmethod() or self._extracting_classmethod()): prefix = (self.info.scope.parent.pyobject.get_name() + '.') else: self_name = self._get_self_name() if (self_name in args): args.remove(self_name) prefix = (self_name + '.') return prefix def _get_comma_form(self, names): return ', '.join(names) def _get_call(self): args = self._find_function_arguments() returns = self._find_function_returns() call_prefix = '' if (returns and ((not self.info.one_line) or self.info.returning_named_expr)): assignment_operator = (' := ' if self.info.one_line else ' = ') call_prefix = (self._get_comma_form(returns) + assignment_operator) if self.info.returned: call_prefix = 'return ' return (call_prefix + self._get_function_call(args)) def _find_function_arguments(self): if (self.info.global_ and (not self.info.make_global)): return list(((self.info_collector.read & self.info_collector.postread) & self.info_collector.written)) if (not self.info.one_line): result = (self.info_collector.prewritten & self.info_collector.read) result |= ((self.info_collector.prewritten & self.info_collector.postread) & (self.info_collector.maybe_written - self.info_collector.written)) return list(result) start = self.info.region[0] if (start == self.info.lines_region[0]): start = (start + re.search('\\S', self.info.extracted).start()) function_definition = self.info.source[start:self.info.region[1]] read = _VariableReadsAndWritesFinder.find_reads_for_one_liners(function_definition) return list(self.info_collector.prewritten.intersection(read)) def _find_function_returns(self): if self.info.one_line: written = (self.info_collector.written | self.info_collector.maybe_written) return list((written & self.info_collector.postread)) if self.info.returned: return [] written = (self.info_collector.written | self.info_collector.maybe_written) return list((written & self.info_collector.postread)) def _get_unindented_function_body(self, returns): if self.info.one_line: return self._get_single_expression_function_body() return self._get_multiline_function_body(returns) def _get_multiline_function_body(self, returns): unindented_body = sourceutils.fix_indentation(self.info.extracted, 0) unindented_body = self._insert_globals(unindented_body) unindented_body = self._insert_nonlocals(unindented_body) if returns: unindented_body += ('\nreturn %s' % self._get_comma_form(returns)) return unindented_body def _get_single_expression_function_body(self): extracted = _get_single_expression_body(self.info.extracted, info=self.info) body = ('return ' + extracted) body = self._insert_globals(body) body = self._insert_nonlocals(body) return body def _insert_globals(self, unindented_body): globals_in_body = self._get_globals_in_body(unindented_body) globals_ = (self.info_collector.globals_ & (self.info_collector.written | self.info_collector.maybe_written)) globals_ = (globals_ - globals_in_body) if globals_: unindented_body = 'global {}\n{}'.format(', '.join(globals_), unindented_body) return unindented_body def _insert_nonlocals(self, unindented_body): nonlocals_in_body = self._get_nonlocals_in_body(unindented_body) nonlocals_ = (self.info_collector.nonlocals_ & (self.info_collector.written | self.info_collector.maybe_written)) nonlocals_ = (nonlocals_ - nonlocals_in_body) if nonlocals_: unindented_body = 'nonlocal {}\n{}'.format(', '.join(nonlocals_), unindented_body) return unindented_body def _get_globals_in_body(unindented_body): node = _parse_text(unindented_body) visitor = _GlobalFinder() visitor.visit(node) return visitor.globals_ def _get_nonlocals_in_body(unindented_body): node = _parse_text(unindented_body) visitor = _NonlocalFinder() visitor.visit(node) return visitor.nonlocals_
def create_ctth_alti_pal_variable_with_fill_value_color(nc_file, var_name): var = nc_file.create_variable(var_name, ('pal_colors_250', 'pal_rgb'), np.uint8) var[:] = PAL_ARRAY var.attrs['palette_meanings'] = CTTH_PALETTE_MEANINGS var.attrs['fill_value_color'] = [0, 0, 0] var.attrs['scale_factor'] = COT_SCALE var.attrs['add_offset'] = COT_OFFSET var.attrs['_FillValue'] = 65535
def test_Anything(): assert_eq(Anything, None) assert_eq(Anything, []) assert_eq(None, Anything) assert_eq([], Anything) assert (not (Anything != None)) assert (not (Anything != [])) assert (not (None != Anything)) assert (not ([] != Anything)) assert_eq('<Anything>', repr(Anything))
class AllDatasetBatchesIterator(MultiIterator): def __init__(self, individual_dataloaders: Mapping[(str, Union[(DataLoader, Iterable)])], iteration_strategy: AllDatasetBatches) -> None: super().__init__(individual_dataloaders, iteration_strategy) self.iteration_strategy = iteration_strategy if (self.iteration_strategy.stopping_mechanism == StoppingMechanism.WRAP_AROUND_UNTIL_KILLED): raise NotImplementedError('WRAP_AROUND_UNTIL_KILLED is not implemented for AllDatasetBatches') self.individual_iterators: Dict[(str, Iterator[DataLoader])] = {name: iter(dl) for (name, dl) in individual_dataloaders.items()} self.iterators_finished: List[str] = [] def __next__(self) -> Dict[(str, Any)]: batch_dict = {} for iterator in self.individual_iterators: try: batch_dict[iterator] = next(self.individual_iterators[iterator]) except StopIteration: if (self.iteration_strategy.stopping_mechanism == StoppingMechanism.SMALLEST_DATASET_EXHAUSTED): raise StopIteration elif (self.iteration_strategy.stopping_mechanism == StoppingMechanism.RESTART_UNTIL_ALL_DATASETS_EXHAUSTED): if (iterator not in self.iterators_finished): self.iterators_finished.append(iterator) if (len(self.iterators_finished) == len(self.individual_iterators)): raise StopIteration else: self.individual_iterators[iterator] = iter(self.individual_dataloaders[iterator]) batch_dict[iterator] = next(self.individual_iterators[iterator]) if (len(batch_dict) == 0): raise StopIteration return batch_dict
class Gmetric(): type = ('', 'string', 'uint16', 'int16', 'uint32', 'int32', 'float', 'double', 'timestamp') protocol = ('udp', 'multicast') def __init__(self, host, port, protocol): if (protocol not in self.protocol): raise ValueError(('Protocol must be one of: ' + str(self.protocol))) self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) if (protocol == 'multicast'): self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 20) self.hostport = (host, int(port)) def send(self, NAME, VAL, TYPE='', UNITS='', SLOPE='both', TMAX=60, DMAX=0, GROUP=''): if (SLOPE not in slope_str2int): raise ValueError(('Slope must be one of: ' + str(self.slope.keys()))) if (TYPE not in self.type): raise ValueError(('Type must be one of: ' + str(self.type))) if (len(NAME) == 0): raise ValueError('Name must be non-empty') (meta_msg, data_msg) = gmetric_write(NAME, VAL, TYPE, UNITS, SLOPE, TMAX, DMAX, GROUP) self.socket.sendto(meta_msg, self.hostport) self.socket.sendto(data_msg, self.hostport)
class FormTagFieldTest(TagTestManager, TestCase): def test_required(self): self.assertTrue(tag_forms.TagField(required=True).required) self.assertTrue(tag_forms.TagField(required=True).widget.is_required) self.assertFalse(tag_forms.TagField(required=False).required) self.assertFalse(tag_forms.TagField(required=False).widget.is_required) self.assertTrue(tag_forms.TagField().required) self.assertTrue(tag_forms.TagField().widget.is_required) def test_input(self): self.assertFieldOutput(tag_forms.TagField, valid={'red': ['red'], 'Red, Blue': ['Blue', 'Red'], '"red, blue", yellow': ['red, blue', 'yellow']}, invalid={}, empty_value=[]) def test_force_lowercase(self): self.assertFieldOutput(tag_forms.TagField, field_kwargs={'tag_options': tag_models.TagOptions(force_lowercase=True)}, valid={'red': ['red'], 'Red, Blue': ['blue', 'red'], '"Red, Blue", Yellow': ['red, blue', 'yellow']}, invalid={}, empty_value=[]) def test_response(self): form = test_forms.TagFieldForm(data={'tags': 'red, blue'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['tags'], ['blue', 'red']) def test_render_basics(self): class LocalTestForm(forms.Form): tag = tag_forms.TagField() form = LocalTestForm() self.assertHTMLEqual(str(form['tag']), '<input autocomplete="off" data-tag-options="{&quot;required&quot;: true}" data-tagulous="true" id="id_tag" name="tag" {{required}}type="text" />') def test_render_tag_optional(self): class LocalTestForm(forms.Form): tag = tag_forms.TagField(required=False) form = LocalTestForm() self.assertHTMLEqual(str(form['tag']), '<input autocomplete="off" data-tag-options="{&quot;required&quot;: false}" data-tagulous="true" id="id_tag" name="tag" type="text" />') def test_render_tag_list(self): class LocalTestForm(forms.Form): tag = tag_forms.TagField(autocomplete_tags=['one', 'two', 'three']) form = LocalTestForm() self.assertHTMLEqual(str(form['tag']), '<input autocomplete="off" data-tag-options="{&quot;required&quot;: true}" data-tagulous="true" data-tag-list="[&quot;one&quot;, &quot;two&quot;, &quot;three&quot;]" id="id_tag" name="tag" {{required}}type="text" />') def test_render_tag_url(self): autocomplete_view = 'tagulous_tests_app-unlimited' class LocalTestForm(forms.Form): tag = tag_forms.TagField(tag_options=tag_models.TagOptions(autocomplete_view=autocomplete_view)) form = LocalTestForm() self.assertHTMLEqual(str(form['tag']), '<input autocomplete="off" data-tag-options="{&quot;required&quot;: true}" data-tagulous="true" data-tag-url="/tagulous_tests_app/autocomplete/unlimited/" id="id_tag" name="tag" {{required}}type="text" />') def test_render_value(self): form = test_forms.TagFieldForm(data={'tags': 'run, walk'}) self.assertHTMLEqual(str(form['tags']), '<input autocomplete="off" data-tag-options="{&quot;required&quot;: true}" data-tagulous="true" id="id_tags" name="tags" {{required}}type="text" value="run, walk" />') def test_render_invalid_tag_url(self): autocomplete_view = 'tagulous_tests_app-view_does_not_exist' class LocalTestForm(forms.Form): tag = tag_forms.TagField(tag_options=tag_models.TagOptions(autocomplete_view=autocomplete_view)) form = LocalTestForm() with self.assertRaises(ValueError) as cm: str(form['tag']) self.assertTrue(str(cm.exception).startswith("Invalid autocomplete view: Reverse for '{0}' not found. '{0}' is not a valid view function or pattern name.".format(autocomplete_view))) def test_render_autocomplete_settings(self): class LocalTestForm(forms.Form): tags = tag_forms.TagField(tag_options=tag_models.TagOptions(autocomplete_settings={'cats': 'purr', 'cows': 'moo'})) form = LocalTestForm() self.assertEqual(form['tags'].field.widget.default_autocomplete_settings, None) form['tags'].field.widget.default_autocomplete_settings = {'bees': 'buzz', 'cats': 'mew'} self.assertHTMLEqual(str(form['tags']), '<input autocomplete="off" data-tag-options="{&quot;autocomplete_settings&quot;: {&quot;cows&quot;: &quot;moo&quot;, &quot;bees&quot;: &quot;buzz&quot;, &quot;cats&quot;: &quot;purr&quot;}, &quot;required&quot;: true}" data-tagulous="true" id="id_tags" name="tags" {{required}}type="text" />') def test_invalid_prepare_value(self): form = test_forms.TagFieldForm() with self.assertRaises(ValueError) as cm: form['tags'].field.prepare_value([1, 2]) self.assertEqual(str(cm.exception), 'Tag field could not prepare unexpected value')
class Migration(migrations.Migration): dependencies = [('adserver_auth', '0003_allow-blank')] operations = [migrations.CreateModel(name='HistoricalUser', fields=[('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('email', models.EmailField(db_index=True, max_length=254, null=True)), ('name', models.CharField(blank=True, default='', max_length=255, verbose_name='name')), ('is_staff', models.BooleanField(default=False, help_text='Is the user allowed to have access to the admin', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('updated_date', models.DateTimeField(blank=True, editable=False, verbose_name='update date')), ('created_date', models.DateTimeField(blank=True, editable=False, verbose_name='create date')), ('history_id', models.AutoField(primary_key=True, serialize=False)), ('history_date', models.DateTimeField()), ('history_change_reason', models.CharField(max_length=100, null=True)), ('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)), ('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL))], options={'verbose_name': 'historical user', 'ordering': ('-history_date', '-history_id'), 'get_latest_by': 'history_date'}, bases=(simple_history.models.HistoricalChanges, models.Model))]
def test_pipeline_runner_main_all(pipeline_cache_reset): expected_notify_output = ['sg1', 'sg1.2', 'success_handler'] with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log: pipelinerunner.run(pipeline_name='pipelines/api/main-all', args_in=['A', 'B', 'C'], groups=['sg1'], success_group='sh', failure_group='fh', loader='arbpack.naivefileloader', py_dir=working_dir_tests) assert (mock_log.mock_calls == [call(v) for v in expected_notify_output])
def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]): failed_post_scenarios = '' logging.info('Runing the Network Chaos tests') failed_post_scenarios = '' scenario_telemetries: list[ScenarioTelemetry] = [] failed_scenarios = [] for net_config in scenarios_list: scenario_telemetry = ScenarioTelemetry() scenario_telemetry.scenario = net_config scenario_telemetry.startTimeStamp = time.time() telemetry.set_parameters_base64(scenario_telemetry, net_config) try: with open(net_config, 'r') as file: param_lst = ['latency', 'loss', 'bandwidth'] test_config = yaml.safe_load(file) test_dict = test_config['network_chaos'] test_duration = int(get_yaml_item_value(test_dict, 'duration', 300)) test_interface = get_yaml_item_value(test_dict, 'interfaces', []) test_node = get_yaml_item_value(test_dict, 'node_name', '') test_node_label = get_yaml_item_value(test_dict, 'label_selector', 'node-role.kubernetes.io/master') test_execution = get_yaml_item_value(test_dict, 'execution', 'serial') test_instance_count = get_yaml_item_value(test_dict, 'instance_count', 1) test_egress = get_yaml_item_value(test_dict, 'egress', {'bandwidth': '100mbit'}) if test_node: node_name_list = test_node.split(',') else: node_name_list = [test_node] nodelst = [] for single_node_name in node_name_list: nodelst.extend(common_node_functions.get_node(single_node_name, test_node_label, test_instance_count, kubecli)) file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__))) env = Environment(loader=file_loader, autoescape=True) pod_template = env.get_template('pod.j2') test_interface = verify_interface(test_interface, nodelst, pod_template, kubecli) joblst = [] egress_lst = [i for i in param_lst if (i in test_egress)] chaos_config = {'network_chaos': {'duration': test_duration, 'interfaces': test_interface, 'node_name': ','.join(nodelst), 'execution': test_execution, 'instance_count': test_instance_count, 'egress': test_egress}} logging.info(('Executing network chaos with config \n %s' % yaml.dump(chaos_config))) job_template = env.get_template('job.j2') try: for i in egress_lst: for node in nodelst: exec_cmd = get_egress_cmd(test_execution, test_interface, i, test_dict['egress'], duration=test_duration) logging.info(('Executing %s on node %s' % (exec_cmd, node))) job_body = yaml.safe_load(job_template.render(jobname=(i + str(hash(node))[:5]), nodename=node, cmd=exec_cmd)) joblst.append(job_body['metadata']['name']) api_response = kubecli.create_job(job_body) if (api_response is None): raise Exception('Error creating job') if (test_execution == 'serial'): logging.info('Waiting for serial job to finish') start_time = int(time.time()) wait_for_job(joblst[:], kubecli, (test_duration + 300)) logging.info(('Waiting for wait_duration %s' % wait_duration)) time.sleep(wait_duration) end_time = int(time.time()) cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time) if (test_execution == 'parallel'): break if (test_execution == 'parallel'): logging.info('Waiting for parallel job to finish') start_time = int(time.time()) wait_for_job(joblst[:], kubecli, (test_duration + 300)) logging.info(('Waiting for wait_duration %s' % wait_duration)) time.sleep(wait_duration) end_time = int(time.time()) cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time) except Exception as e: logging.error(('Network Chaos exiting due to Exception %s' % e)) raise RuntimeError() finally: logging.info('Deleting jobs') delete_job(joblst[:], kubecli) except (RuntimeError, Exception): scenario_telemetry.exitStatus = 1 failed_scenarios.append(net_config) log_exception(net_config) else: scenario_telemetry.exitStatus = 0 scenario_telemetries.append(scenario_telemetry) return (failed_scenarios, scenario_telemetries)
class Effect7013(BaseEffect): type = 'passive' def handler(fit, src, context, projectionRange, **kwargs): fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Missile Launcher Operation')), 'kineticDamage', src.getModifiedItemAttr('eliteBonusGunship1'), skill='Assault Frigates', **kwargs)
class FGM(): def __init__(self, model): self.model = model self.backup = {} def attack(self, epsilon=1.0, emb_name='word_embeddings'): for (name, param) in self.model.named_parameters(): if (param.requires_grad and (emb_name in name)): self.backup[name] = param.data.clone() norm = torch.norm(param.grad) if (norm != 0): r_at = ((epsilon * param.grad) / norm) param.data.add_(r_at) def restore(self, emb_name='word_embeddings'): for (name, param) in self.model.named_parameters(): if (param.requires_grad and (emb_name in name)): assert (name in self.backup) param.data = self.backup[name] self.backup = {}
class S3Path(_S3Path): keep_file = '.s3keep' def mkdir(self, mode: int=511, parents: bool=False, exist_ok: bool=False) -> None: self.joinpath(self.keep_file).touch() def glob(self, pattern: str) -> Iterator[S3Path]: bucket_name = self.bucket (resource, _) = self._accessor.configuration_map.get_configuration(self) bucket = resource.Bucket(bucket_name) kwargs = {'Bucket': bucket_name, 'Prefix': self._accessor.generate_prefix(self), 'Delimiter': ''} continuation_token = None while True: if continuation_token: kwargs['ContinuationToken'] = continuation_token response = bucket.meta.client.list_objects_v2(**kwargs) for file in response['Contents']: file_path = S3Path(f"/{bucket_name}/{file['Key']}") if fnmatch(str(file_path.relative_to(self)), pattern): (yield file_path) if (not response.get('IsTruncated')): break continuation_token = response.get('NextContinuationToken')
class Solution(): def twoSum(self, nums, target): for i in nums: a = (target - i) m = nums.index(i) if (a in nums): try: n = nums.index(a, (m + 1), len(nums)) return (m, n) except: continue
def test_annotation_long(testdir): testdir.makepyfile("\n import pytest\n pytest_plugins = 'pytest_github_actions_annotate_failures'\n\n def f(x):\n return x\n\n def test_fail():\n x = 1\n x += 1\n x += 1\n x += 1\n x += 1\n x += 1\n x += 1\n x += 1\n\n assert f(x) == 3\n ") testdir.monkeypatch.setenv('GITHUB_ACTIONS', 'true') result = testdir.runpytest_subprocess() result.stderr.fnmatch_lines(['::error file=test_annotation_long.py,line=17::test_fail*assert 8 == 3*where 8 = f(8)*']) no_fnmatch_line(result, '::*assert x += 1*')
class TestRegisteringSubclass(BaseTestCase): def test_handling_duplicate(self): with pytest.raises(ValueError) as e: rname.register_subclass(rname.GPIBInstr) assert ('Class already registered for' in e.exconly()) def test_handling_duplicate_default(self) -> None: with pytest.raises(ValueError) as e: class R(rname.ResourceName): interface_type: ClassVar[str] = 'TCPIP' resource_class: ClassVar[str] = 'HISLIP' is_rc_optional: ClassVar[bool] = True rname.register_subclass(R) assert ('Default already specified for' in e.exconly())
def _standardize_weights(y, sample_weight=None, class_weight=None, sample_weight_mode=None): if (sample_weight_mode is not None): if (sample_weight_mode != 'temporal'): raise ValueError(('"sample_weight_mode should be None or "temporal". Found: ' + str(sample_weight_mode))) if (len(y.shape) < 3): raise ValueError((('Found a sample_weight array for an input with shape ' + str(y.shape)) + '. Timestep-wise sample weighting (use of sample_weight_mode="temporal") is restricted to outputs that are at least 3D, i.e. that have a time dimension.')) if ((sample_weight is not None) and (len(sample_weight.shape) != 2)): raise ValueError((('Found a sample_weight array with shape ' + str(sample_weight.shape)) + '. In order to use timestep-wise sample weighting, you should pass a 2D sample_weight array.')) elif ((sample_weight is not None) and (len(sample_weight.shape) != 1)): raise ValueError((('Found a sample_weight array with shape ' + str(sample_weight.shape)) + '. In order to use timestep-wise sample weights, you should specify sample_weight_mode="temporal" in compile(). If you just mean to use sample-wise weights, make sure your sample_weight array is 1D.')) if (sample_weight is not None): if (len(sample_weight.shape) > len(y.shape)): raise ValueError(((('Found a sample_weight with shape' + str(sample_weight.shape)) + '.Expected sample_weight with rank less than or equal to ') + str(len(y.shape)))) if (y.shape[:sample_weight.ndim] != sample_weight.shape): raise ValueError((((('Found a sample_weight array with shape ' + str(sample_weight.shape)) + ' for an input with shape ') + str(y.shape)) + '. sample_weight cannot be broadcast.')) return sample_weight elif isinstance(class_weight, dict): if (len(y.shape) > 2): raise ValueError('`class_weight` not supported for 3+ dimensional targets.') if (y.shape[1] > 1): y_classes = y.argmax(axis=1) elif (y.shape[1] == 1): y_classes = np.reshape(y, y.shape[0]) else: y_classes = y weights = np.asarray([class_weight[cls] for cls in y_classes if (cls in class_weight)]) if (len(weights) != len(y_classes)): existing_classes = set(y_classes) existing_class_weight = set(class_weight.keys()) raise ValueError(('`class_weight` must contain all classes in the data. The classes %s exist in the data but not in `class_weight`.' % (existing_classes - existing_class_weight))) return weights elif (sample_weight_mode is None): return np.ones((y.shape[0],), dtype=K.floatx()) else: return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
def _expval_with_stddev(coeffs: np.ndarray, probs: np.ndarray, shots: int) -> Tuple[(float, float)]: expval = coeffs.dot(probs) sq_expval = (coeffs ** 2).dot(probs) variance = ((sq_expval - (expval ** 2)) / shots) if ((variance < 0) and (not np.isclose(variance, 0))): logger.warning('Encountered a negative variance in expectation value calculation.(%f). Setting standard deviation of result to 0.', variance) stddev = (np.sqrt(variance) if (variance > 0) else 0.0) return (expval, stddev)
def test_infer_generated_setter() -> None: code = '\n class A:\n \n def test(self):\n pass\n A.test.setter\n ' node = extract_node(code) inferred = next(node.infer()) assert isinstance(inferred, nodes.FunctionDef) assert isinstance(inferred.args, nodes.Arguments) assert (not list(inferred.nodes_of_class(nodes.Const)))
class AccreditationFitter(): def __init__(self): self._counts_all = {} self._counts_accepted = {} self._Ntraps = None self._Nrejects = [] self._Nruns = 0 self._Nacc = 0 self._g = 1.0 self.flag = None self.outputs = None self.num_runs = None self.N_acc = None self.bound = None self.confidence = None def Reset(self): self._counts_all = {} self._counts_accepted = {} self._Ntraps = None self._Nrejects = [] self._Nruns = 0 self._Nacc = 0 self._g = 1.0 self.flag = None self.outputs = None self.num_runs = None self.N_acc = None self.bound = None self.confidence = None def AppendResults(self, results, postp_list, v_zero): strings = [] for (ind, _) in enumerate(postp_list): counts = results.get_counts(ind) shots = 0 countstring = None for (countstring, val) in counts.items(): shots += val if ((shots != 1) or (countstring is None)): raise QiskitError('ERROR: not single shot data') strings.append(countstring) self._AppendData(strings, postp_list, v_zero) def AppendStrings(self, strings, postp_list, v_zero): self._AppendData(strings, postp_list, v_zero) def FullAccreditation(self, confidence): if (self._Nacc == 0): raise QiskitError('ERROR: Variation distance requiresat least one accepted run') if ((confidence > 1) or (confidence < 0)): raise QiskitError('ERROR: Confidence must bebetween 0 and 1') theta = np.sqrt((np.log((2 / (1 - confidence))) / (2 * self._Nruns))) if ((self._Nacc / self._Nruns) > theta): bound = ((self._g * 1.7) / (self._Ntraps + 1)) bound = (bound / ((self._Nacc / self._Nruns) - theta)) bound = ((bound + 1) - self._g) else: bound = 1 bound = min(bound, 1) return (self._counts_accepted, bound, confidence) def MeanAccreditation(self, confidence): theta = np.sqrt((np.log((2 / (1 - confidence))) / (2 * self._Nruns))) bound = ((((2 * np.sum(self._Nrejects)) / self._Nruns) / self._Ntraps) + theta) bound = min(bound, 1) return (self._counts_all, bound, confidence) def _AppendData(self, strings, postp_list, v_zero): if (self._Ntraps is None): self._Ntraps = (len(postp_list) - 1) elif ((len(postp_list) - 1) != self._Ntraps): raise QiskitError('ERROR: Run protocol with thesame number of traps') if (self._Ntraps < 3): raise QiskitError('ERROR: run the protocol with at least 3 traps') self._Nruns += 1 self._Nrejects.append(0) flag = True for (ind, (s, p)) in enumerate(zip(strings, postp_list)): if (ind != v_zero): meas = QOTPCorrectString(s, p) if (meas != ('0' * len(meas))): flag = False self._Nrejects[(- 1)] += 1 else: target_count = QOTPCorrectString(s, p) if (target_count in self._counts_all.keys()): self._counts_all[target_count] += 1 else: self._counts_all[target_count] = 1 if flag: self._Nacc += 1 if (target_count in self._counts_accepted.keys()): self._counts_accepted[target_count] += 1 else: self._counts_accepted[target_count] = 1 _function('single_protocol_run is being deprecated. Use AppendResult or AppendString') def single_protocol_run(self, results, postp_list, v_zero): self._Nruns = (self._Nruns + 1) self.flag = 'accepted' if (self._Nruns == 1): self._Ntraps = (len(postp_list) - 1) elif ((len(postp_list) - 1) != self._Ntraps): raise QiskitError('ERROR: Run protocol with thesame number of traps') if (self._Ntraps < 3): raise QiskitError('ERROR: run the protocol with at least 3 traps') allcounts = [] for (ind, postp) in enumerate(postp_list): counts = results.get_counts(ind) counts = QOTPCorrectCounts(counts, postp) shots = 0 countstring = None for (countstring, val) in counts.items(): shots += val if ((shots != 1) or (countstring is None)): raise QiskitError('ERROR: not single shot data') allcounts.append(countstring) for (k, count) in enumerate(allcounts): if (k != v_zero): if (count != ('0' * len(count))): self.flag = 'rejected' else: output_target = count if (self.flag == 'accepted'): self._Nacc += 1 if (output_target in self._counts_accepted.keys()): self._counts_accepted[output_target] += 1 else: self._counts_accepted[output_target] = 1 self.outputs = self._counts_accepted self.num_runs = self._Nruns self.N_acc = self._Nacc _function('bound_variation_distance is being deprecated. Use FullAccreditation or MeanAccreditation') def bound_variation_distance(self, theta): if (self._Nacc == 0): raise QiskitError('ERROR: Variation distance requiresat least one accepted run') if ((self._Nacc / self._Nruns) > theta): self.bound = ((self._g * 1.7) / (self._Ntraps + 1)) self.bound = (self.bound / ((self._Nacc / self._Nruns) - theta)) self.bound = ((self.bound + 1) - self._g) self.confidence = (1 - (2 * np.exp(((((- 2) * theta) * self._Nruns) * self._Nruns)))) self.bound = min(self.bound, 1)
def test_cc_head(): head = CCHead(in_channels=32, channels=16, num_classes=19) assert (len(head.convs) == 2) assert hasattr(head, 'cca') if (not torch.cuda.is_available()): pytest.skip('CCHead requires CUDA') inputs = [torch.randn(1, 32, 45, 45)] (head, inputs) = to_cuda(head, inputs) outputs = head(inputs) assert (outputs.shape == (1, head.num_classes, 45, 45))
class Imagefolder_modified(DatasetFolder): def __init__(self, root, transform=None, target_transform=None, loader=default_loader, is_valid_file=None, cached=False, number=None): super(Imagefolder_modified, self).__init__(root, loader, (IMG_EXTENSIONS if (is_valid_file is None) else None), transform=transform, target_transform=target_transform, is_valid_file=is_valid_file, cached=cached, number=number) self.imgs = self.samples
def _on_raw(func_name): def wrapped(self, *args, **kwargs): args = list(args) try: string = args.pop(0) if hasattr(string, '_raw_string'): args.insert(0, string.raw()) else: args.insert(0, string) except IndexError: pass result = getattr(self._raw_string, func_name)(*args, **kwargs) if isinstance(result, str): return ANSIString(result, decoded=True) return result return wrapped
class TestNormalDistribution(QiskitTestCase): def assertDistributionIsCorrect(self, circuit, num_qubits, mu, sigma, bounds, upto_diag): if (not isinstance(num_qubits, (list, np.ndarray))): num_qubits = [num_qubits] if (not isinstance(mu, (list, np.ndarray))): mu = [mu] if (not isinstance(sigma, (list, np.ndarray))): sigma = [[sigma]] if (not isinstance(bounds[0], tuple)): bounds = [bounds] meshgrid = np.meshgrid(*[np.linspace(bound[0], bound[1], num=(2 ** num_qubits[i])) for (i, bound) in enumerate(bounds)], indexing='ij') x = list(zip(*[grid.flatten() for grid in meshgrid])) probabilities = multivariate_normal.pdf(x, mu, sigma) normalized_probabilities = (probabilities / np.sum(probabilities)) expected = np.sqrt(normalized_probabilities) actual = Statevector.from_instruction(circuit) if upto_diag: self.assertTrue(actual.equiv(expected)) else: np.testing.assert_array_almost_equal(expected, actual.data) ([2, None, None, None, False], [3, 1.75, 2.5, None, True], [2, 1.75, 2.5, (0, 3), False], [[1, 2, 2], None, None, None, True], [[1, 2, 1], [0, 1, 1], [[1.2, 0, 0], [0, 0.5, 0], [0, 0, 0.1]], [(0, 2), ((- 1), 1), ((- 3), 3)], False]) def test_normal(self, num_qubits, mu, sigma, bounds, upto_diag): kwargs = {'num_qubits': num_qubits, 'upto_diag': upto_diag} if (mu is None): mu = (np.zeros(len(num_qubits)) if isinstance(num_qubits, list) else 0) else: kwargs['mu'] = mu if (sigma is None): sigma = (np.eye(len(num_qubits)).tolist() if isinstance(num_qubits, list) else 1) else: kwargs['sigma'] = sigma if (bounds is None): bounds = ([((- 1), 1)] * (len(num_qubits) if isinstance(num_qubits, list) else 1)) else: kwargs['bounds'] = bounds normal = NormalDistribution(**kwargs) self.assertDistributionIsCorrect(normal, num_qubits, mu, sigma, bounds, upto_diag) ([2, [1, 1], 2, (0, 1)], [2, 1.2, [[1, 0], [0, 1]], (0, 1)], [2, 1.2, 1, [(0, 1), (0, 1)]], [[1, 2], 1, [[1, 0], [0, 1]], [(0, 1), (0, 1)]], [[1, 2], [0, 0], [[2]], [(0, 1), (0, 1)]], [[1, 2], [0, 0], [[1, 0], [0, 1]], [0, 1]]) def test_mismatching_dimensions(self, num_qubits, mu, sigma, bounds): with self.assertRaises(ValueError): _ = NormalDistribution(num_qubits, mu, sigma, bounds) ([(0, 0), (0, 1)], [((- 2), (- 1)), (1, 0)]) def test_bounds_invalid(self, bounds): with self.assertRaises(ValueError): _ = NormalDistribution([1, 1], [0, 0], [[1, 0], [0, 1]], bounds)
class PizzaTestDrive(): def main(*args) -> None: nyStore: PizzaStore = NYPizzaStore() chicagoStore: PizzaStore = ChicagoPizzaStore() pizza: Pizza = nyStore.orderPizza('cheese') print(f'''Ethan ordered a {pizza.getName()} ''') pizza = chicagoStore.orderPizza('cheese') print(f'''Joel ordered a {pizza.getName()} ''') pizza = nyStore.orderPizza('clam') print(f'''Ethan ordered a {pizza.getName()} ''') pizza = chicagoStore.orderPizza('clam') print(f'''Joel ordered a {pizza.getName()} ''') pizza = nyStore.orderPizza('pepperoni') print(f'''Ethan ordered a {pizza.getName()} ''') pizza = chicagoStore.orderPizza('pepperoni') print(f'''Joel ordered a {pizza.getName()} ''') pizza = nyStore.orderPizza('veggie') print(f'''Ethan ordered a {pizza.getName()} ''') pizza = chicagoStore.orderPizza('veggie') print(f'''Joel ordered a {pizza.getName()} ''')
def test_logins_fails_with_invalid_email(graphql_client): UserFactory(email='', password='test') response = graphql_client.query('mutation($input: LoginInput!) {\n login(input: $input) {\n __typename\n ... on LoginErrors {\n errors {\n email\n password\n }\n }\n }\n }', variables={'input': {'email': 'not.an.email', 'password': 'incorrect'}}) assert (response['data']['login']['__typename'] == 'LoginErrors') assert (response['data']['login']['errors']['email'] == ['Email is not valid']) assert (response['data']['login']['errors']['password'] == [])
class JavaLexer(RegexLexer): name = 'Java' url = ' aliases = ['java'] filenames = ['*.java'] mimetypes = ['text/x-java'] version_added = '' flags = (re.MULTILINE | re.DOTALL) tokens = {'root': [('(^\\s*)((?:(?:public|private|protected|static|strictfp)(?:\\s+))*)(record)\\b', bygroups(Whitespace, using(this), Keyword.Declaration), 'class'), ('[^\\S\\n]+', Whitespace), ('(//.*?)(\\n)', bygroups(Comment.Single, Whitespace)), ('/\\*.*?\\*/', Comment.Multiline), ('(assert|break|case|catch|continue|default|do|else|finally|for|if|goto|instanceof|new|return|switch|this|throw|try|while)\\b', Keyword), ('((?:(?:[^\\W\\d]|\\$)[\\w.\\[\\]$<>]*\\s+)+?)((?:[^\\W\\d]|\\$)[\\w$]*)(\\s*)(\\()', bygroups(using(this), Name.Function, Whitespace, Punctuation)), ('[^\\W\\d][\\w.]*', Name.Decorator), ('(abstract|const|enum|extends|final|implements|native|private|protected|public|sealed|static|strictfp|super|synchronized|throws|transient|volatile|yield)\\b', Keyword.Declaration), ('(boolean|byte|char|double|float|int|long|short|void)\\b', Keyword.Type), ('(package)(\\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'), ('(true|false|null)\\b', Keyword.Constant), ('(class|interface)\\b', Keyword.Declaration, 'class'), ('(var)(\\s+)', bygroups(Keyword.Declaration, Whitespace), 'var'), ('(import(?:\\s+static)?)(\\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'), ('"""\\n', String, 'multiline_string'), ('"', String, 'string'), ("'\\\\.'|'[^\\\\]'|'\\\\u[0-9a-fA-F]{4}'", String.Char), ('(\\.)((?:[^\\W\\d]|\\$)[\\w$]*)', bygroups(Punctuation, Name.Attribute)), ('^(\\s*)(default)(:)', bygroups(Whitespace, Keyword, Punctuation)), ('^(\\s*)((?:[^\\W\\d]|\\$)[\\w$]*)(:)', bygroups(Whitespace, Name.Label, Punctuation)), ('([^\\W\\d]|\\$)[\\w$]*', Name), ('([0-9][0-9_]*\\.([0-9][0-9_]*)?|\\.[0-9][0-9_]*)([eE][+\\-]?[0-9][0-9_]*)?[fFdD]?|[0-9][eE][+\\-]?[0-9][0-9_]*[fFdD]?|[0-9]([eE][+\\-]?[0-9][0-9_]*)?[fFdD]|0[xX]([0-9a-fA-F][0-9a-fA-F_]*\\.?|([0-9a-fA-F][0-9a-fA-F_]*)?\\.[0-9a-fA-F][0-9a-fA-F_]*)[pP][+\\-]?[0-9][0-9_]*[fFdD]?', Number.Float), ('0[xX][0-9a-fA-F][0-9a-fA-F_]*[lL]?', Number.Hex), ('0[bB][01][01_]*[lL]?', Number.Bin), ('0[0-7_]+[lL]?', Number.Oct), ('0|[1-9][0-9_]*[lL]?', Number.Integer), ('[~^*!%&\\[\\]<>|+=/?-]', Operator), ('[{}();:.,]', Punctuation), ('\\n', Whitespace)], 'class': [('\\s+', Text), ('([^\\W\\d]|\\$)[\\w$]*', Name.Class, '#pop')], 'var': [('([^\\W\\d]|\\$)[\\w$]*', Name, '#pop')], 'import': [('[\\w.]+\\*?', Name.Namespace, '#pop')], 'multiline_string': [('"""', String, '#pop'), ('"', String), include('string')], 'string': [('[^\\\\"]+', String), ('', String), ('\\\\"', String), ('\\\\', String), ('"', String, '#pop')]}
def find_paths(path_patterns: Sequence[str], exclude_name_patterns: Sequence[str]=[], cwd: Optional[Union[(Path, str)]]=None) -> Generator[(Path, None, None)]: if (cwd is None): cwd = Path.cwd() elif isinstance(cwd, str): cwd = Path(cwd) for pattern in path_patterns: for path in cwd.glob(pattern): for exclude_name_pattern in exclude_name_patterns: if fnmatch(path.name, exclude_name_pattern): break else: (yield path)
class GANLoss(nn.Module): def __init__(self, use_lsgan=True, gan_mode='lsgan', target_real_label=1.0, target_fake_label=0.0): super(GANLoss, self).__init__() self.register_buffer('real_label', torch.tensor(target_real_label)) self.register_buffer('fake_label', torch.tensor(target_fake_label)) if use_lsgan: self.loss = nn.MSELoss() else: self.loss = nn.BCELoss() assert (gan_mode in ['lsgan', 'vanilla', 'wgangp']) if (gan_mode in ['wgangp']): self.loss = None self.gan_mode = gan_mode def get_target_tensor(self, input, target_is_real): if target_is_real: target_tensor = self.real_label else: target_tensor = self.fake_label return target_tensor.expand_as(input) def __call__(self, prediction, target_is_real, add_gradient=False): if (self.gan_mode in ['lsgan', 'vanilla']): target_tensor = self.get_target_tensor(prediction, target_is_real) loss = self.loss(prediction, target_tensor) elif (self.gan_mode == 'wgangp'): if target_is_real: loss = (- prediction.mean()) if add_gradient: loss = ((- prediction.mean()) + (0.001 * (prediction ** 2).mean())) else: loss = prediction.mean() return loss
class FunctionTest(unittest.TestCase): def test_asized(self): self.assertEqual(list(asizeof.asized(detail=2)), []) self.assertRaises(KeyError, asizeof.asized, **{'all': True}) sized = asizeof.asized(Foo(42), detail=2) self.assertEqual(sized.name, 'Foo') refs = [ref for ref in sized.refs if (ref.name == '__dict__')] self.assertEqual(len(refs), 1) self.assertEqual(refs[0], sized.get('__dict__')) refs = [ref for ref in refs[0].refs if (ref.name == '[V] data: 42')] self.assertEqual(len(refs), 1, refs) i = 42 self.assertEqual(refs[0].size, asizeof.asizeof(i), refs[0].size) sizer = asizeof.Asizer() sized_objs = sizer.asized(Foo(3), Foo(4), detail=2) self.assertEqual(len(sized_objs), 2) def test_asized_detail(self): foo = Foo(42) size1 = asizeof.asized(foo, detail=1) size2 = asizeof.asized(foo, detail=2) self.assertEqual(size1.size, size2.size) def test_asized_format(self): foo = Foo(42) sized1 = asizeof.asized(foo, detail=1) sized2 = asizeof.asized(foo, detail=2) sized1_no = sized1.format('%(name)s', order_by='name') sized1_d1 = sized1.format('%(name)s', depth=1, order_by='name') sized1_d2 = sized1.format('%(name)s', depth=2, order_by='name') sized2_d1 = sized2.format('%(name)s', depth=1, order_by='name') sized2_d2 = sized2.format('%(name)s', depth=2, order_by='name') self.assertEqual(sized1_no, 'Foo\n __class__\n __dict__') self.assertEqual(sized1_no, sized1_d1) self.assertEqual(sized1_no, sized1_d2) self.assertEqual(sized1_d1, sized2_d1) self.assertNotEqual(sized2_d1, sized2_d2) def test_asizesof(self): self.assertEqual(list(asizeof.asizesof()), []) self.assertRaises(KeyError, asizeof.asizesof, **{'all': True}) objs = [Foo(42), ThinFoo('spam'), OldFoo(67)] sizes = list(asizeof.asizesof(*objs)) objs.reverse() rsizes = list(asizeof.asizesof(*objs)) self.assertEqual(len(sizes), 3) rsizes.reverse() self.assertEqual(sizes, rsizes, (sizes, rsizes)) objs.reverse() isizes = [asizeof.asizeof(obj) for obj in objs] self.assertEqual(sizes, isizes) sizer = asizeof.Asizer() asizer_sizes = sizer.asizesof(*objs) self.assertEqual(list(asizer_sizes), sizes) code_sizes = sizer.asizesof(*objs, **dict(code=True)) self.assertNotEqual(list(code_sizes), sizes) def test_asizeof(self): self.assertEqual(asizeof.asizeof(), 0) objs = [Foo(42), ThinFoo('spam'), OldFoo(67)] total = asizeof.asizeof(*objs) sizes = list(asizeof.asizesof(*objs)) sum = 0 for sz in sizes: sum += sz self.assertEqual(total, sum, (total, sum)) def test_asizer_limit(self): objs = [Foo(42), ThinFoo('spam'), OldFoo(67)] sizer = [asizeof.Asizer() for _ in range(4)] for (limit, asizer) in enumerate(sizer): asizer.asizeof(objs, limit=limit) limit_sizes = [asizer.total for asizer in sizer] self.assertTrue((limit_sizes[0] < limit_sizes[1]), limit_sizes) self.assertTrue((limit_sizes[1] < limit_sizes[2]), limit_sizes) self.assertTrue((limit_sizes[2] < limit_sizes[3]), limit_sizes) def test_basicsize(self): objects = [1, '', 'a', True, None] for o in objects: self.assertEqual(asizeof.basicsize(o), type(o).__basicsize__) objects = [[], (), {}] for o in objects: self.assertEqual((asizeof.basicsize(o) - asizeof._sizeof_CPyGC_Head), type(o).__basicsize__) l1 = [1, 2, 3, 4] l2 = ['spam', 2, 3, 4, 'eggs', 6, 7, 8] self.assertEqual(asizeof.basicsize(l1), asizeof.basicsize(l2)) def test_itemsize(self): objects = [1, True, None, ()] for o in objects: self.assertEqual(asizeof.itemsize(o), type(o).__itemsize__) itemsizes = [({}, asizeof._sizeof_CPyDictEntry), (set(), asizeof._sizeof_Csetentry)] for (o, itemsize) in itemsizes: self.assertEqual(asizeof.itemsize(o), itemsize) def test_leng(self): l = [1, 2, 3, 4] s = 'spam' self.assertTrue((asizeof.leng(l) >= len(l)), asizeof.leng(l)) self.assertEqual(asizeof.leng(tuple(l)), len(l)) self.assertTrue((asizeof.leng(set(l)) >= len(set(l)))) self.assertTrue((asizeof.leng(s) >= len(s))) self.assertTrue((asizeof.leng(42) in [None, 1]), asizeof.leng(42)) base = 2 try: base = long(base) except NameError: pass self.assertEqual(asizeof.leng(((base ** 8) - 1)), 1) self.assertEqual(asizeof.leng(((base ** 16) - 1)), 1) self.assertTrue((asizeof.leng(((base ** 32) - 1)) >= 1)) self.assertTrue((asizeof.leng(((base ** 64) - 1)) >= 2)) def test_refs(self): f = Foo(42) refs = list(asizeof.refs(f)) self.assertTrue((len(refs) >= 1), len(refs)) self.assertTrue(({'data': 42} in refs), refs) f = OldFoo(42) refs = list(asizeof.refs(f)) self.assertTrue((len(refs) >= 1), len(refs)) self.assertTrue(({'odata': 42} in refs), refs) f = ThinFoo(42) refs = list(asizeof.refs(f)) self.assertTrue((len(refs) >= 2), len(refs)) self.assertTrue((42 in refs), refs) def test_exclude_types(self): sizer = asizeof.Asizer() sizer.exclude_types(Foo) self.assertEqual(sizer.asizeof(Foo('ignored')), 0) def test_asizer(self): sizer = asizeof.Asizer() obj = 'unladen swallow' mutable = [obj] sizer.asizeof(obj) self.assertEqual(sizer.total, asizeof.asizeof(obj)) sizer.asizeof(mutable, mutable) self.assertEqual(sizer.duplicate, 2) self.assertEqual(sizer.total, asizeof.asizeof(obj, mutable)) def test_adict(self): pdict = PseudoDict() size1 = asizeof.asizeof(pdict) asizeof.adict(PseudoDict) size2 = asizeof.asizeof(pdict) self.assertEqual(size1, size2) def test_private_slots(self): class PrivateSlot(object): __slots__ = ('__data',) def __init__(self, data): self.__data = data data = ([42] * 100) container = PrivateSlot(data) size1 = asizeof.asizeof(container) size2 = asizeof.asizeof(data) self.assertTrue((size1 > size2), (size1, size2))
class AttentionBlock(nn.Module): def __init__(self, channels, num_heads=1, use_checkpoint=False): super().__init__() self.channels = channels self.num_heads = num_heads self.use_checkpoint = use_checkpoint self.norm = normalization(channels) self.qkv = conv_nd(1, channels, (channels * 3), 1) self.attention = QKVAttention() self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) def forward(self, x): return checkpoint(self._forward, (x,), self.parameters(), self.use_checkpoint) def _forward(self, x): (b, c, *spatial) = x.shape x = x.reshape(b, c, (- 1)) qkv = self.qkv(self.norm(x)) qkv = qkv.reshape((b * self.num_heads), (- 1), qkv.shape[2]) h = self.attention(qkv) h = h.reshape(b, (- 1), h.shape[(- 1)]) h = self.proj_out(h) return (x + h).reshape(b, c, *spatial)
def should_stop_early(cfg: DictConfig, valid_loss: float) -> bool: if (valid_loss is None): return False if (cfg.checkpoint.patience <= 0): return False def is_better(a, b): return ((a > b) if cfg.checkpoint.maximize_best_checkpoint_metric else (a < b)) prev_best = getattr(should_stop_early, 'best', None) if ((prev_best is None) or is_better(valid_loss, prev_best)): should_stop_early.best = valid_loss should_stop_early.num_runs = 0 return False else: should_stop_early.num_runs += 1 if (should_stop_early.num_runs >= cfg.checkpoint.patience): logger.info("early stop since valid performance hasn't improved for last {} runs".format(cfg.checkpoint.patience)) return True else: return False
def module_name_from_dir(dirname, err=True, files=None): if (files is None): try: files = os.listdir(dirname) except OSError as e: if ((e.errno == 2) and (not err)): return None names = [file for file in files if (file.endswith('.so') or file.endswith('.pyd'))] if ((len(names) == 0) and (not err)): return None elif (len(names) == 1): return os.path.join(dirname, names[0]) else: raise ValueError(('More than 1 compiled module in this directory:' + dirname))
def get_gml_graph(location): try: g = nx_pydot.read_dot(location).to_undirected() temp = sorted(g) mapping = {} for node in temp: node_name = node if (',' in node_name): node_name = (('"' + node_name) + '"') if (node_name[(- 1)] == '.'): node_name = node_name[:(- 1)] if (not node_name[0].isupper()): if (len(node_name) == 1): node_name = node_name.upper() else: node_name = (node_name[0].upper() + node_name[1:]) mapping[node] = node_name g = nx.relabel_nodes(g, mapping) return g except: raise Exception('Could not open file!')
def run_data_migration(apps, schema_editor): QuestionSet = apps.get_model('questions', 'QuestionSet') Question = apps.get_model('questions', 'Question') VerboseName = apps.get_model('domain', 'VerboseName') Range = apps.get_model('domain', 'Range') for questionset in QuestionSet.objects.exclude(attribute_entity=None): try: verbose_name = questionset.attribute_entity.verbosename questionset.verbose_name_en = verbose_name.name_en questionset.verbose_name_de = verbose_name.name_de questionset.verbose_name_plural_en = verbose_name.name_plural_en questionset.verbose_name_plural_de = verbose_name.name_plural_de questionset.save() except VerboseName.DoesNotExist: pass for question in Question.objects.all(): try: verbose_name = question.attribute_entity.verbosename question.verbose_name_en = verbose_name.name_en question.verbose_name_de = verbose_name.name_de question.verbose_name_plural_en = verbose_name.name_plural_en question.verbose_name_plural_de = verbose_name.name_plural_de question.save() except VerboseName.DoesNotExist: pass try: range = question.attribute_entity.attribute.range question.minimum = range.minimum question.maximum = range.maximum question.step = range.step question.save() except Range.DoesNotExist: pass