repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
reiinakano/xcessiv
xcessiv/automatedruns.py
start_tpot
def start_tpot(automated_run, session, path): """Starts a TPOT automated run that exports directly to base learner setup Args: automated_run (xcessiv.models.AutomatedRun): Automated run object session: Valid SQLAlchemy session path (str, unicode): Path to project folder """ module = functions.import_string_code_as_module(automated_run.source) extraction = session.query(models.Extraction).first() X, y = extraction.return_train_dataset() tpot_learner = module.tpot_learner tpot_learner.fit(X, y) temp_filename = os.path.join(path, 'tpot-temp-export-{}'.format(os.getpid())) tpot_learner.export(temp_filename) with open(temp_filename) as f: base_learner_source = f.read() base_learner_source = constants.tpot_learner_docstring + base_learner_source try: os.remove(temp_filename) except OSError: pass blo = models.BaseLearnerOrigin( source=base_learner_source, name='TPOT Learner', meta_feature_generator='predict' ) session.add(blo) session.commit()
python
def start_tpot(automated_run, session, path): """Starts a TPOT automated run that exports directly to base learner setup Args: automated_run (xcessiv.models.AutomatedRun): Automated run object session: Valid SQLAlchemy session path (str, unicode): Path to project folder """ module = functions.import_string_code_as_module(automated_run.source) extraction = session.query(models.Extraction).first() X, y = extraction.return_train_dataset() tpot_learner = module.tpot_learner tpot_learner.fit(X, y) temp_filename = os.path.join(path, 'tpot-temp-export-{}'.format(os.getpid())) tpot_learner.export(temp_filename) with open(temp_filename) as f: base_learner_source = f.read() base_learner_source = constants.tpot_learner_docstring + base_learner_source try: os.remove(temp_filename) except OSError: pass blo = models.BaseLearnerOrigin( source=base_learner_source, name='TPOT Learner', meta_feature_generator='predict' ) session.add(blo) session.commit()
[ "def", "start_tpot", "(", "automated_run", ",", "session", ",", "path", ")", ":", "module", "=", "functions", ".", "import_string_code_as_module", "(", "automated_run", ".", "source", ")", "extraction", "=", "session", ".", "query", "(", "models", ".", "Extraction", ")", ".", "first", "(", ")", "X", ",", "y", "=", "extraction", ".", "return_train_dataset", "(", ")", "tpot_learner", "=", "module", ".", "tpot_learner", "tpot_learner", ".", "fit", "(", "X", ",", "y", ")", "temp_filename", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'tpot-temp-export-{}'", ".", "format", "(", "os", ".", "getpid", "(", ")", ")", ")", "tpot_learner", ".", "export", "(", "temp_filename", ")", "with", "open", "(", "temp_filename", ")", "as", "f", ":", "base_learner_source", "=", "f", ".", "read", "(", ")", "base_learner_source", "=", "constants", ".", "tpot_learner_docstring", "+", "base_learner_source", "try", ":", "os", ".", "remove", "(", "temp_filename", ")", "except", "OSError", ":", "pass", "blo", "=", "models", ".", "BaseLearnerOrigin", "(", "source", "=", "base_learner_source", ",", "name", "=", "'TPOT Learner'", ",", "meta_feature_generator", "=", "'predict'", ")", "session", ".", "add", "(", "blo", ")", "session", ".", "commit", "(", ")" ]
Starts a TPOT automated run that exports directly to base learner setup Args: automated_run (xcessiv.models.AutomatedRun): Automated run object session: Valid SQLAlchemy session path (str, unicode): Path to project folder
[ "Starts", "a", "TPOT", "automated", "run", "that", "exports", "directly", "to", "base", "learner", "setup" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/automatedruns.py#L210-L248
train
reiinakano/xcessiv
xcessiv/automatedruns.py
start_greedy_ensemble_search
def start_greedy_ensemble_search(automated_run, session, path): """Starts an automated ensemble search using greedy forward model selection. The steps for this search are adapted from "Ensemble Selection from Libraries of Models" by Caruana. 1. Start with the empty ensemble 2. Add to the ensemble the model in the library that maximizes the ensemmble's performance on the error metric. 3. Repeat step 2 for a fixed number of iterations or until all models have been used. Args: automated_run (xcessiv.models.AutomatedRun): Automated run object session: Valid SQLAlchemy session path (str, unicode): Path to project folder """ module = functions.import_string_code_as_module(automated_run.source) assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators best_ensemble = [] # List containing IDs of best performing ensemble for the last round secondary_learner = automated_run.base_learner_origin.return_estimator() secondary_learner.set_params(**module.secondary_learner_hyperparameters) for i in range(module.max_num_base_learners): best_score = -float('inf') # Best metric for this round (not in total!) current_ensemble = best_ensemble[:] # Shallow copy of best ensemble for base_learner in session.query(models.BaseLearner).filter_by(job_status='finished').all(): if base_learner in current_ensemble: # Don't append when learner is already in continue current_ensemble.append(base_learner) # Check if our "best ensemble" already exists existing_ensemble = session.query(models.StackedEnsemble).\ filter_by(base_learner_origin_id=automated_run.base_learner_origin.id, secondary_learner_hyperparameters=secondary_learner.get_params(), base_learner_ids=sorted([bl.id for bl in current_ensemble])).first() if existing_ensemble and existing_ensemble.job_status == 'finished': score = existing_ensemble.individual_score[module.metric_to_optimize] elif existing_ensemble and existing_ensemble.job_status != 'finished': eval_stacked_ensemble(existing_ensemble, session, path) score = existing_ensemble.individual_score[module.metric_to_optimize] else: stacked_ensemble = models.StackedEnsemble( secondary_learner_hyperparameters=secondary_learner.get_params(), base_learners=current_ensemble, base_learner_origin=automated_run.base_learner_origin, job_status='started' ) session.add(stacked_ensemble) session.commit() eval_stacked_ensemble(stacked_ensemble, session, path) score = stacked_ensemble.individual_score[module.metric_to_optimize] score = -score if module.invert_metric else score if best_score < score: best_score = score best_ensemble = current_ensemble[:] current_ensemble.pop()
python
def start_greedy_ensemble_search(automated_run, session, path): """Starts an automated ensemble search using greedy forward model selection. The steps for this search are adapted from "Ensemble Selection from Libraries of Models" by Caruana. 1. Start with the empty ensemble 2. Add to the ensemble the model in the library that maximizes the ensemmble's performance on the error metric. 3. Repeat step 2 for a fixed number of iterations or until all models have been used. Args: automated_run (xcessiv.models.AutomatedRun): Automated run object session: Valid SQLAlchemy session path (str, unicode): Path to project folder """ module = functions.import_string_code_as_module(automated_run.source) assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators best_ensemble = [] # List containing IDs of best performing ensemble for the last round secondary_learner = automated_run.base_learner_origin.return_estimator() secondary_learner.set_params(**module.secondary_learner_hyperparameters) for i in range(module.max_num_base_learners): best_score = -float('inf') # Best metric for this round (not in total!) current_ensemble = best_ensemble[:] # Shallow copy of best ensemble for base_learner in session.query(models.BaseLearner).filter_by(job_status='finished').all(): if base_learner in current_ensemble: # Don't append when learner is already in continue current_ensemble.append(base_learner) # Check if our "best ensemble" already exists existing_ensemble = session.query(models.StackedEnsemble).\ filter_by(base_learner_origin_id=automated_run.base_learner_origin.id, secondary_learner_hyperparameters=secondary_learner.get_params(), base_learner_ids=sorted([bl.id for bl in current_ensemble])).first() if existing_ensemble and existing_ensemble.job_status == 'finished': score = existing_ensemble.individual_score[module.metric_to_optimize] elif existing_ensemble and existing_ensemble.job_status != 'finished': eval_stacked_ensemble(existing_ensemble, session, path) score = existing_ensemble.individual_score[module.metric_to_optimize] else: stacked_ensemble = models.StackedEnsemble( secondary_learner_hyperparameters=secondary_learner.get_params(), base_learners=current_ensemble, base_learner_origin=automated_run.base_learner_origin, job_status='started' ) session.add(stacked_ensemble) session.commit() eval_stacked_ensemble(stacked_ensemble, session, path) score = stacked_ensemble.individual_score[module.metric_to_optimize] score = -score if module.invert_metric else score if best_score < score: best_score = score best_ensemble = current_ensemble[:] current_ensemble.pop()
[ "def", "start_greedy_ensemble_search", "(", "automated_run", ",", "session", ",", "path", ")", ":", "module", "=", "functions", ".", "import_string_code_as_module", "(", "automated_run", ".", "source", ")", "assert", "module", ".", "metric_to_optimize", "in", "automated_run", ".", "base_learner_origin", ".", "metric_generators", "best_ensemble", "=", "[", "]", "# List containing IDs of best performing ensemble for the last round", "secondary_learner", "=", "automated_run", ".", "base_learner_origin", ".", "return_estimator", "(", ")", "secondary_learner", ".", "set_params", "(", "*", "*", "module", ".", "secondary_learner_hyperparameters", ")", "for", "i", "in", "range", "(", "module", ".", "max_num_base_learners", ")", ":", "best_score", "=", "-", "float", "(", "'inf'", ")", "# Best metric for this round (not in total!)", "current_ensemble", "=", "best_ensemble", "[", ":", "]", "# Shallow copy of best ensemble", "for", "base_learner", "in", "session", ".", "query", "(", "models", ".", "BaseLearner", ")", ".", "filter_by", "(", "job_status", "=", "'finished'", ")", ".", "all", "(", ")", ":", "if", "base_learner", "in", "current_ensemble", ":", "# Don't append when learner is already in", "continue", "current_ensemble", ".", "append", "(", "base_learner", ")", "# Check if our \"best ensemble\" already exists", "existing_ensemble", "=", "session", ".", "query", "(", "models", ".", "StackedEnsemble", ")", ".", "filter_by", "(", "base_learner_origin_id", "=", "automated_run", ".", "base_learner_origin", ".", "id", ",", "secondary_learner_hyperparameters", "=", "secondary_learner", ".", "get_params", "(", ")", ",", "base_learner_ids", "=", "sorted", "(", "[", "bl", ".", "id", "for", "bl", "in", "current_ensemble", "]", ")", ")", ".", "first", "(", ")", "if", "existing_ensemble", "and", "existing_ensemble", ".", "job_status", "==", "'finished'", ":", "score", "=", "existing_ensemble", ".", "individual_score", "[", "module", ".", "metric_to_optimize", "]", "elif", "existing_ensemble", "and", "existing_ensemble", ".", "job_status", "!=", "'finished'", ":", "eval_stacked_ensemble", "(", "existing_ensemble", ",", "session", ",", "path", ")", "score", "=", "existing_ensemble", ".", "individual_score", "[", "module", ".", "metric_to_optimize", "]", "else", ":", "stacked_ensemble", "=", "models", ".", "StackedEnsemble", "(", "secondary_learner_hyperparameters", "=", "secondary_learner", ".", "get_params", "(", ")", ",", "base_learners", "=", "current_ensemble", ",", "base_learner_origin", "=", "automated_run", ".", "base_learner_origin", ",", "job_status", "=", "'started'", ")", "session", ".", "add", "(", "stacked_ensemble", ")", "session", ".", "commit", "(", ")", "eval_stacked_ensemble", "(", "stacked_ensemble", ",", "session", ",", "path", ")", "score", "=", "stacked_ensemble", ".", "individual_score", "[", "module", ".", "metric_to_optimize", "]", "score", "=", "-", "score", "if", "module", ".", "invert_metric", "else", "score", "if", "best_score", "<", "score", ":", "best_score", "=", "score", "best_ensemble", "=", "current_ensemble", "[", ":", "]", "current_ensemble", ".", "pop", "(", ")" ]
Starts an automated ensemble search using greedy forward model selection. The steps for this search are adapted from "Ensemble Selection from Libraries of Models" by Caruana. 1. Start with the empty ensemble 2. Add to the ensemble the model in the library that maximizes the ensemmble's performance on the error metric. 3. Repeat step 2 for a fixed number of iterations or until all models have been used. Args: automated_run (xcessiv.models.AutomatedRun): Automated run object session: Valid SQLAlchemy session path (str, unicode): Path to project folder
[ "Starts", "an", "automated", "ensemble", "search", "using", "greedy", "forward", "model", "selection", "." ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/automatedruns.py#L331-L398
train
reiinakano/xcessiv
xcessiv/rqtasks.py
extraction_data_statistics
def extraction_data_statistics(path): """ Generates data statistics for the given data extraction setup stored in Xcessiv notebook. This is in rqtasks.py but not as a job yet. Temporarily call this directly while I'm figuring out Javascript lel. Args: path (str, unicode): Path to xcessiv notebook """ with functions.DBContextManager(path) as session: extraction = session.query(models.Extraction).first() X, y = extraction.return_main_dataset() functions.verify_dataset(X, y) if extraction.test_dataset['method'] == 'split_from_main': X, X_test, y, y_test = train_test_split( X, y, test_size=extraction.test_dataset['split_ratio'], random_state=extraction.test_dataset['split_seed'], stratify=y ) elif extraction.test_dataset['method'] == 'source': if 'source' not in extraction.test_dataset or not extraction.test_dataset['source']: raise exceptions.UserError('Source is empty') extraction_code = extraction.test_dataset["source"] extraction_function = functions.\ import_object_from_string_code(extraction_code, "extract_test_dataset") X_test, y_test = extraction_function() else: X_test, y_test = None, None # test base learner cross-validation extraction_code = extraction.meta_feature_generation['source'] return_splits_iterable = functions.import_object_from_string_code( extraction_code, 'return_splits_iterable' ) number_of_splits = 0 test_indices = [] try: for train_idx, test_idx in return_splits_iterable(X, y): number_of_splits += 1 test_indices.append(test_idx) except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) # preparation before testing stacked ensemble cross-validation test_indices = np.concatenate(test_indices) X, y = X[test_indices], y[test_indices] # test stacked ensemble cross-validation extraction_code = extraction.stacked_ensemble_cv['source'] return_splits_iterable = functions.import_object_from_string_code( extraction_code, 'return_splits_iterable' ) number_of_splits_stacked_cv = 0 try: for train_idx, test_idx in return_splits_iterable(X, y): number_of_splits_stacked_cv += 1 except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) data_stats = dict() data_stats['train_data_stats'] = functions.verify_dataset(X, y) if X_test is not None: data_stats['test_data_stats'] = functions.verify_dataset(X_test, y_test) else: data_stats['test_data_stats'] = None data_stats['holdout_data_stats'] = {'number_of_splits': number_of_splits} data_stats['stacked_ensemble_cv_stats'] = {'number_of_splits': number_of_splits_stacked_cv} extraction.data_statistics = data_stats session.add(extraction) session.commit()
python
def extraction_data_statistics(path): """ Generates data statistics for the given data extraction setup stored in Xcessiv notebook. This is in rqtasks.py but not as a job yet. Temporarily call this directly while I'm figuring out Javascript lel. Args: path (str, unicode): Path to xcessiv notebook """ with functions.DBContextManager(path) as session: extraction = session.query(models.Extraction).first() X, y = extraction.return_main_dataset() functions.verify_dataset(X, y) if extraction.test_dataset['method'] == 'split_from_main': X, X_test, y, y_test = train_test_split( X, y, test_size=extraction.test_dataset['split_ratio'], random_state=extraction.test_dataset['split_seed'], stratify=y ) elif extraction.test_dataset['method'] == 'source': if 'source' not in extraction.test_dataset or not extraction.test_dataset['source']: raise exceptions.UserError('Source is empty') extraction_code = extraction.test_dataset["source"] extraction_function = functions.\ import_object_from_string_code(extraction_code, "extract_test_dataset") X_test, y_test = extraction_function() else: X_test, y_test = None, None # test base learner cross-validation extraction_code = extraction.meta_feature_generation['source'] return_splits_iterable = functions.import_object_from_string_code( extraction_code, 'return_splits_iterable' ) number_of_splits = 0 test_indices = [] try: for train_idx, test_idx in return_splits_iterable(X, y): number_of_splits += 1 test_indices.append(test_idx) except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) # preparation before testing stacked ensemble cross-validation test_indices = np.concatenate(test_indices) X, y = X[test_indices], y[test_indices] # test stacked ensemble cross-validation extraction_code = extraction.stacked_ensemble_cv['source'] return_splits_iterable = functions.import_object_from_string_code( extraction_code, 'return_splits_iterable' ) number_of_splits_stacked_cv = 0 try: for train_idx, test_idx in return_splits_iterable(X, y): number_of_splits_stacked_cv += 1 except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) data_stats = dict() data_stats['train_data_stats'] = functions.verify_dataset(X, y) if X_test is not None: data_stats['test_data_stats'] = functions.verify_dataset(X_test, y_test) else: data_stats['test_data_stats'] = None data_stats['holdout_data_stats'] = {'number_of_splits': number_of_splits} data_stats['stacked_ensemble_cv_stats'] = {'number_of_splits': number_of_splits_stacked_cv} extraction.data_statistics = data_stats session.add(extraction) session.commit()
[ "def", "extraction_data_statistics", "(", "path", ")", ":", "with", "functions", ".", "DBContextManager", "(", "path", ")", "as", "session", ":", "extraction", "=", "session", ".", "query", "(", "models", ".", "Extraction", ")", ".", "first", "(", ")", "X", ",", "y", "=", "extraction", ".", "return_main_dataset", "(", ")", "functions", ".", "verify_dataset", "(", "X", ",", "y", ")", "if", "extraction", ".", "test_dataset", "[", "'method'", "]", "==", "'split_from_main'", ":", "X", ",", "X_test", ",", "y", ",", "y_test", "=", "train_test_split", "(", "X", ",", "y", ",", "test_size", "=", "extraction", ".", "test_dataset", "[", "'split_ratio'", "]", ",", "random_state", "=", "extraction", ".", "test_dataset", "[", "'split_seed'", "]", ",", "stratify", "=", "y", ")", "elif", "extraction", ".", "test_dataset", "[", "'method'", "]", "==", "'source'", ":", "if", "'source'", "not", "in", "extraction", ".", "test_dataset", "or", "not", "extraction", ".", "test_dataset", "[", "'source'", "]", ":", "raise", "exceptions", ".", "UserError", "(", "'Source is empty'", ")", "extraction_code", "=", "extraction", ".", "test_dataset", "[", "\"source\"", "]", "extraction_function", "=", "functions", ".", "import_object_from_string_code", "(", "extraction_code", ",", "\"extract_test_dataset\"", ")", "X_test", ",", "y_test", "=", "extraction_function", "(", ")", "else", ":", "X_test", ",", "y_test", "=", "None", ",", "None", "# test base learner cross-validation", "extraction_code", "=", "extraction", ".", "meta_feature_generation", "[", "'source'", "]", "return_splits_iterable", "=", "functions", ".", "import_object_from_string_code", "(", "extraction_code", ",", "'return_splits_iterable'", ")", "number_of_splits", "=", "0", "test_indices", "=", "[", "]", "try", ":", "for", "train_idx", ",", "test_idx", "in", "return_splits_iterable", "(", "X", ",", "y", ")", ":", "number_of_splits", "+=", "1", "test_indices", ".", "append", "(", "test_idx", ")", "except", "Exception", "as", "e", ":", "raise", "exceptions", ".", "UserError", "(", "'User code exception'", ",", "exception_message", "=", "str", "(", "e", ")", ")", "# preparation before testing stacked ensemble cross-validation", "test_indices", "=", "np", ".", "concatenate", "(", "test_indices", ")", "X", ",", "y", "=", "X", "[", "test_indices", "]", ",", "y", "[", "test_indices", "]", "# test stacked ensemble cross-validation", "extraction_code", "=", "extraction", ".", "stacked_ensemble_cv", "[", "'source'", "]", "return_splits_iterable", "=", "functions", ".", "import_object_from_string_code", "(", "extraction_code", ",", "'return_splits_iterable'", ")", "number_of_splits_stacked_cv", "=", "0", "try", ":", "for", "train_idx", ",", "test_idx", "in", "return_splits_iterable", "(", "X", ",", "y", ")", ":", "number_of_splits_stacked_cv", "+=", "1", "except", "Exception", "as", "e", ":", "raise", "exceptions", ".", "UserError", "(", "'User code exception'", ",", "exception_message", "=", "str", "(", "e", ")", ")", "data_stats", "=", "dict", "(", ")", "data_stats", "[", "'train_data_stats'", "]", "=", "functions", ".", "verify_dataset", "(", "X", ",", "y", ")", "if", "X_test", "is", "not", "None", ":", "data_stats", "[", "'test_data_stats'", "]", "=", "functions", ".", "verify_dataset", "(", "X_test", ",", "y_test", ")", "else", ":", "data_stats", "[", "'test_data_stats'", "]", "=", "None", "data_stats", "[", "'holdout_data_stats'", "]", "=", "{", "'number_of_splits'", ":", "number_of_splits", "}", "data_stats", "[", "'stacked_ensemble_cv_stats'", "]", "=", "{", "'number_of_splits'", ":", "number_of_splits_stacked_cv", "}", "extraction", ".", "data_statistics", "=", "data_stats", "session", ".", "add", "(", "extraction", ")", "session", ".", "commit", "(", ")" ]
Generates data statistics for the given data extraction setup stored in Xcessiv notebook. This is in rqtasks.py but not as a job yet. Temporarily call this directly while I'm figuring out Javascript lel. Args: path (str, unicode): Path to xcessiv notebook
[ "Generates", "data", "statistics", "for", "the", "given", "data", "extraction", "setup", "stored", "in", "Xcessiv", "notebook", "." ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/rqtasks.py#L17-L95
train
reiinakano/xcessiv
xcessiv/rqtasks.py
generate_meta_features
def generate_meta_features(path, base_learner_id): """Generates meta-features for specified base learner After generation of meta-features, the file is saved into the meta-features folder Args: path (str): Path to Xcessiv notebook base_learner_id (str): Base learner ID """ with functions.DBContextManager(path) as session: base_learner = session.query(models.BaseLearner).filter_by(id=base_learner_id).first() if not base_learner: raise exceptions.UserError('Base learner {} ' 'does not exist'.format(base_learner_id)) base_learner.job_id = get_current_job().id base_learner.job_status = 'started' session.add(base_learner) session.commit() try: est = base_learner.return_estimator() extraction = session.query(models.Extraction).first() X, y = extraction.return_train_dataset() return_splits_iterable = functions.import_object_from_string_code( extraction.meta_feature_generation['source'], 'return_splits_iterable' ) meta_features_list = [] trues_list = [] for train_index, test_index in return_splits_iterable(X, y): X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] est = est.fit(X_train, y_train) meta_features_list.append( getattr(est, base_learner.base_learner_origin. meta_feature_generator)(X_test) ) trues_list.append(y_test) meta_features = np.concatenate(meta_features_list, axis=0) y_true = np.concatenate(trues_list) for key in base_learner.base_learner_origin.metric_generators: metric_generator = functions.import_object_from_string_code( base_learner.base_learner_origin.metric_generators[key], 'metric_generator' ) base_learner.individual_score[key] = metric_generator(y_true, meta_features) meta_features_path = base_learner.meta_features_path(path) if not os.path.exists(os.path.dirname(meta_features_path)): os.makedirs(os.path.dirname(meta_features_path)) np.save(meta_features_path, meta_features, allow_pickle=False) base_learner.job_status = 'finished' base_learner.meta_features_exists = True session.add(base_learner) session.commit() except: session.rollback() base_learner.job_status = 'errored' base_learner.description['error_type'] = repr(sys.exc_info()[0]) base_learner.description['error_value'] = repr(sys.exc_info()[1]) base_learner.description['error_traceback'] = \ traceback.format_exception(*sys.exc_info()) session.add(base_learner) session.commit() raise
python
def generate_meta_features(path, base_learner_id): """Generates meta-features for specified base learner After generation of meta-features, the file is saved into the meta-features folder Args: path (str): Path to Xcessiv notebook base_learner_id (str): Base learner ID """ with functions.DBContextManager(path) as session: base_learner = session.query(models.BaseLearner).filter_by(id=base_learner_id).first() if not base_learner: raise exceptions.UserError('Base learner {} ' 'does not exist'.format(base_learner_id)) base_learner.job_id = get_current_job().id base_learner.job_status = 'started' session.add(base_learner) session.commit() try: est = base_learner.return_estimator() extraction = session.query(models.Extraction).first() X, y = extraction.return_train_dataset() return_splits_iterable = functions.import_object_from_string_code( extraction.meta_feature_generation['source'], 'return_splits_iterable' ) meta_features_list = [] trues_list = [] for train_index, test_index in return_splits_iterable(X, y): X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] est = est.fit(X_train, y_train) meta_features_list.append( getattr(est, base_learner.base_learner_origin. meta_feature_generator)(X_test) ) trues_list.append(y_test) meta_features = np.concatenate(meta_features_list, axis=0) y_true = np.concatenate(trues_list) for key in base_learner.base_learner_origin.metric_generators: metric_generator = functions.import_object_from_string_code( base_learner.base_learner_origin.metric_generators[key], 'metric_generator' ) base_learner.individual_score[key] = metric_generator(y_true, meta_features) meta_features_path = base_learner.meta_features_path(path) if not os.path.exists(os.path.dirname(meta_features_path)): os.makedirs(os.path.dirname(meta_features_path)) np.save(meta_features_path, meta_features, allow_pickle=False) base_learner.job_status = 'finished' base_learner.meta_features_exists = True session.add(base_learner) session.commit() except: session.rollback() base_learner.job_status = 'errored' base_learner.description['error_type'] = repr(sys.exc_info()[0]) base_learner.description['error_value'] = repr(sys.exc_info()[1]) base_learner.description['error_traceback'] = \ traceback.format_exception(*sys.exc_info()) session.add(base_learner) session.commit() raise
[ "def", "generate_meta_features", "(", "path", ",", "base_learner_id", ")", ":", "with", "functions", ".", "DBContextManager", "(", "path", ")", "as", "session", ":", "base_learner", "=", "session", ".", "query", "(", "models", ".", "BaseLearner", ")", ".", "filter_by", "(", "id", "=", "base_learner_id", ")", ".", "first", "(", ")", "if", "not", "base_learner", ":", "raise", "exceptions", ".", "UserError", "(", "'Base learner {} '", "'does not exist'", ".", "format", "(", "base_learner_id", ")", ")", "base_learner", ".", "job_id", "=", "get_current_job", "(", ")", ".", "id", "base_learner", ".", "job_status", "=", "'started'", "session", ".", "add", "(", "base_learner", ")", "session", ".", "commit", "(", ")", "try", ":", "est", "=", "base_learner", ".", "return_estimator", "(", ")", "extraction", "=", "session", ".", "query", "(", "models", ".", "Extraction", ")", ".", "first", "(", ")", "X", ",", "y", "=", "extraction", ".", "return_train_dataset", "(", ")", "return_splits_iterable", "=", "functions", ".", "import_object_from_string_code", "(", "extraction", ".", "meta_feature_generation", "[", "'source'", "]", ",", "'return_splits_iterable'", ")", "meta_features_list", "=", "[", "]", "trues_list", "=", "[", "]", "for", "train_index", ",", "test_index", "in", "return_splits_iterable", "(", "X", ",", "y", ")", ":", "X_train", ",", "X_test", "=", "X", "[", "train_index", "]", ",", "X", "[", "test_index", "]", "y_train", ",", "y_test", "=", "y", "[", "train_index", "]", ",", "y", "[", "test_index", "]", "est", "=", "est", ".", "fit", "(", "X_train", ",", "y_train", ")", "meta_features_list", ".", "append", "(", "getattr", "(", "est", ",", "base_learner", ".", "base_learner_origin", ".", "meta_feature_generator", ")", "(", "X_test", ")", ")", "trues_list", ".", "append", "(", "y_test", ")", "meta_features", "=", "np", ".", "concatenate", "(", "meta_features_list", ",", "axis", "=", "0", ")", "y_true", "=", "np", ".", "concatenate", "(", "trues_list", ")", "for", "key", "in", "base_learner", ".", "base_learner_origin", ".", "metric_generators", ":", "metric_generator", "=", "functions", ".", "import_object_from_string_code", "(", "base_learner", ".", "base_learner_origin", ".", "metric_generators", "[", "key", "]", ",", "'metric_generator'", ")", "base_learner", ".", "individual_score", "[", "key", "]", "=", "metric_generator", "(", "y_true", ",", "meta_features", ")", "meta_features_path", "=", "base_learner", ".", "meta_features_path", "(", "path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "meta_features_path", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "meta_features_path", ")", ")", "np", ".", "save", "(", "meta_features_path", ",", "meta_features", ",", "allow_pickle", "=", "False", ")", "base_learner", ".", "job_status", "=", "'finished'", "base_learner", ".", "meta_features_exists", "=", "True", "session", ".", "add", "(", "base_learner", ")", "session", ".", "commit", "(", ")", "except", ":", "session", ".", "rollback", "(", ")", "base_learner", ".", "job_status", "=", "'errored'", "base_learner", ".", "description", "[", "'error_type'", "]", "=", "repr", "(", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ")", "base_learner", ".", "description", "[", "'error_value'", "]", "=", "repr", "(", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ")", "base_learner", ".", "description", "[", "'error_traceback'", "]", "=", "traceback", ".", "format_exception", "(", "*", "sys", ".", "exc_info", "(", ")", ")", "session", ".", "add", "(", "base_learner", ")", "session", ".", "commit", "(", ")", "raise" ]
Generates meta-features for specified base learner After generation of meta-features, the file is saved into the meta-features folder Args: path (str): Path to Xcessiv notebook base_learner_id (str): Base learner ID
[ "Generates", "meta", "-", "features", "for", "specified", "base", "learner" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/rqtasks.py#L99-L171
train
reiinakano/xcessiv
xcessiv/rqtasks.py
start_automated_run
def start_automated_run(path, automated_run_id): """Starts automated run. This will automatically create base learners until the run finishes or errors out. Args: path (str): Path to Xcessiv notebook automated_run_id (str): Automated Run ID """ with functions.DBContextManager(path) as session: automated_run = session.query(models.AutomatedRun).filter_by(id=automated_run_id).first() if not automated_run: raise exceptions.UserError('Automated run {} ' 'does not exist'.format(automated_run_id)) automated_run.job_id = get_current_job().id automated_run.job_status = 'started' session.add(automated_run) session.commit() try: if automated_run.category == 'bayes': automatedruns.start_naive_bayes(automated_run, session, path) elif automated_run.category == 'tpot': automatedruns.start_tpot(automated_run, session, path) elif automated_run.category == 'greedy_ensemble_search': automatedruns.start_greedy_ensemble_search(automated_run, session, path) else: raise Exception('Something went wrong. Invalid category for automated run') automated_run.job_status = 'finished' session.add(automated_run) session.commit() except: session.rollback() automated_run.job_status = 'errored' automated_run.description['error_type'] = repr(sys.exc_info()[0]) automated_run.description['error_value'] = repr(sys.exc_info()[1]) automated_run.description['error_traceback'] = \ traceback.format_exception(*sys.exc_info()) session.add(automated_run) session.commit() raise
python
def start_automated_run(path, automated_run_id): """Starts automated run. This will automatically create base learners until the run finishes or errors out. Args: path (str): Path to Xcessiv notebook automated_run_id (str): Automated Run ID """ with functions.DBContextManager(path) as session: automated_run = session.query(models.AutomatedRun).filter_by(id=automated_run_id).first() if not automated_run: raise exceptions.UserError('Automated run {} ' 'does not exist'.format(automated_run_id)) automated_run.job_id = get_current_job().id automated_run.job_status = 'started' session.add(automated_run) session.commit() try: if automated_run.category == 'bayes': automatedruns.start_naive_bayes(automated_run, session, path) elif automated_run.category == 'tpot': automatedruns.start_tpot(automated_run, session, path) elif automated_run.category == 'greedy_ensemble_search': automatedruns.start_greedy_ensemble_search(automated_run, session, path) else: raise Exception('Something went wrong. Invalid category for automated run') automated_run.job_status = 'finished' session.add(automated_run) session.commit() except: session.rollback() automated_run.job_status = 'errored' automated_run.description['error_type'] = repr(sys.exc_info()[0]) automated_run.description['error_value'] = repr(sys.exc_info()[1]) automated_run.description['error_traceback'] = \ traceback.format_exception(*sys.exc_info()) session.add(automated_run) session.commit() raise
[ "def", "start_automated_run", "(", "path", ",", "automated_run_id", ")", ":", "with", "functions", ".", "DBContextManager", "(", "path", ")", "as", "session", ":", "automated_run", "=", "session", ".", "query", "(", "models", ".", "AutomatedRun", ")", ".", "filter_by", "(", "id", "=", "automated_run_id", ")", ".", "first", "(", ")", "if", "not", "automated_run", ":", "raise", "exceptions", ".", "UserError", "(", "'Automated run {} '", "'does not exist'", ".", "format", "(", "automated_run_id", ")", ")", "automated_run", ".", "job_id", "=", "get_current_job", "(", ")", ".", "id", "automated_run", ".", "job_status", "=", "'started'", "session", ".", "add", "(", "automated_run", ")", "session", ".", "commit", "(", ")", "try", ":", "if", "automated_run", ".", "category", "==", "'bayes'", ":", "automatedruns", ".", "start_naive_bayes", "(", "automated_run", ",", "session", ",", "path", ")", "elif", "automated_run", ".", "category", "==", "'tpot'", ":", "automatedruns", ".", "start_tpot", "(", "automated_run", ",", "session", ",", "path", ")", "elif", "automated_run", ".", "category", "==", "'greedy_ensemble_search'", ":", "automatedruns", ".", "start_greedy_ensemble_search", "(", "automated_run", ",", "session", ",", "path", ")", "else", ":", "raise", "Exception", "(", "'Something went wrong. Invalid category for automated run'", ")", "automated_run", ".", "job_status", "=", "'finished'", "session", ".", "add", "(", "automated_run", ")", "session", ".", "commit", "(", ")", "except", ":", "session", ".", "rollback", "(", ")", "automated_run", ".", "job_status", "=", "'errored'", "automated_run", ".", "description", "[", "'error_type'", "]", "=", "repr", "(", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ")", "automated_run", ".", "description", "[", "'error_value'", "]", "=", "repr", "(", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ")", "automated_run", ".", "description", "[", "'error_traceback'", "]", "=", "traceback", ".", "format_exception", "(", "*", "sys", ".", "exc_info", "(", ")", ")", "session", ".", "add", "(", "automated_run", ")", "session", ".", "commit", "(", ")", "raise" ]
Starts automated run. This will automatically create base learners until the run finishes or errors out. Args: path (str): Path to Xcessiv notebook automated_run_id (str): Automated Run ID
[ "Starts", "automated", "run", ".", "This", "will", "automatically", "create", "base", "learners", "until", "the", "run", "finishes", "or", "errors", "out", "." ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/rqtasks.py#L175-L221
train
reiinakano/xcessiv
xcessiv/functions.py
hash_file
def hash_file(path, block_size=65536): """Returns SHA256 checksum of a file Args: path (string): Absolute file path of file to hash block_size (int, optional): Number of bytes to read per block """ sha256 = hashlib.sha256() with open(path, 'rb') as f: for block in iter(lambda: f.read(block_size), b''): sha256.update(block) return sha256.hexdigest()
python
def hash_file(path, block_size=65536): """Returns SHA256 checksum of a file Args: path (string): Absolute file path of file to hash block_size (int, optional): Number of bytes to read per block """ sha256 = hashlib.sha256() with open(path, 'rb') as f: for block in iter(lambda: f.read(block_size), b''): sha256.update(block) return sha256.hexdigest()
[ "def", "hash_file", "(", "path", ",", "block_size", "=", "65536", ")", ":", "sha256", "=", "hashlib", ".", "sha256", "(", ")", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "for", "block", "in", "iter", "(", "lambda", ":", "f", ".", "read", "(", "block_size", ")", ",", "b''", ")", ":", "sha256", ".", "update", "(", "block", ")", "return", "sha256", ".", "hexdigest", "(", ")" ]
Returns SHA256 checksum of a file Args: path (string): Absolute file path of file to hash block_size (int, optional): Number of bytes to read per block
[ "Returns", "SHA256", "checksum", "of", "a", "file" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L16-L28
train
reiinakano/xcessiv
xcessiv/functions.py
import_object_from_path
def import_object_from_path(path, object): """Used to import an object from an absolute path. This function takes an absolute path and imports it as a Python module. It then returns the object with name `object` from the imported module. Args: path (string): Absolute file path of .py file to import object (string): Name of object to extract from imported module """ with open(path) as f: return import_object_from_string_code(f.read(), object)
python
def import_object_from_path(path, object): """Used to import an object from an absolute path. This function takes an absolute path and imports it as a Python module. It then returns the object with name `object` from the imported module. Args: path (string): Absolute file path of .py file to import object (string): Name of object to extract from imported module """ with open(path) as f: return import_object_from_string_code(f.read(), object)
[ "def", "import_object_from_path", "(", "path", ",", "object", ")", ":", "with", "open", "(", "path", ")", "as", "f", ":", "return", "import_object_from_string_code", "(", "f", ".", "read", "(", ")", ",", "object", ")" ]
Used to import an object from an absolute path. This function takes an absolute path and imports it as a Python module. It then returns the object with name `object` from the imported module. Args: path (string): Absolute file path of .py file to import object (string): Name of object to extract from imported module
[ "Used", "to", "import", "an", "object", "from", "an", "absolute", "path", "." ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L36-L48
train
reiinakano/xcessiv
xcessiv/functions.py
import_object_from_string_code
def import_object_from_string_code(code, object): """Used to import an object from arbitrary passed code. Passed in code is treated as a module and is imported and added to `sys.modules` with its SHA256 hash as key. Args: code (string): Python code to import as module object (string): Name of object to extract from imported module """ sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest() module = imp.new_module(sha256) try: exec_(code, module.__dict__) except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) sys.modules[sha256] = module try: return getattr(module, object) except AttributeError: raise exceptions.UserError("{} not found in code".format(object))
python
def import_object_from_string_code(code, object): """Used to import an object from arbitrary passed code. Passed in code is treated as a module and is imported and added to `sys.modules` with its SHA256 hash as key. Args: code (string): Python code to import as module object (string): Name of object to extract from imported module """ sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest() module = imp.new_module(sha256) try: exec_(code, module.__dict__) except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) sys.modules[sha256] = module try: return getattr(module, object) except AttributeError: raise exceptions.UserError("{} not found in code".format(object))
[ "def", "import_object_from_string_code", "(", "code", ",", "object", ")", ":", "sha256", "=", "hashlib", ".", "sha256", "(", "code", ".", "encode", "(", "'UTF-8'", ")", ")", ".", "hexdigest", "(", ")", "module", "=", "imp", ".", "new_module", "(", "sha256", ")", "try", ":", "exec_", "(", "code", ",", "module", ".", "__dict__", ")", "except", "Exception", "as", "e", ":", "raise", "exceptions", ".", "UserError", "(", "'User code exception'", ",", "exception_message", "=", "str", "(", "e", ")", ")", "sys", ".", "modules", "[", "sha256", "]", "=", "module", "try", ":", "return", "getattr", "(", "module", ",", "object", ")", "except", "AttributeError", ":", "raise", "exceptions", ".", "UserError", "(", "\"{} not found in code\"", ".", "format", "(", "object", ")", ")" ]
Used to import an object from arbitrary passed code. Passed in code is treated as a module and is imported and added to `sys.modules` with its SHA256 hash as key. Args: code (string): Python code to import as module object (string): Name of object to extract from imported module
[ "Used", "to", "import", "an", "object", "from", "arbitrary", "passed", "code", "." ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L51-L72
train
reiinakano/xcessiv
xcessiv/functions.py
import_string_code_as_module
def import_string_code_as_module(code): """Used to run arbitrary passed code as a module Args: code (string): Python code to import as module Returns: module: Python module """ sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest() module = imp.new_module(sha256) try: exec_(code, module.__dict__) except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) sys.modules[sha256] = module return module
python
def import_string_code_as_module(code): """Used to run arbitrary passed code as a module Args: code (string): Python code to import as module Returns: module: Python module """ sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest() module = imp.new_module(sha256) try: exec_(code, module.__dict__) except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) sys.modules[sha256] = module return module
[ "def", "import_string_code_as_module", "(", "code", ")", ":", "sha256", "=", "hashlib", ".", "sha256", "(", "code", ".", "encode", "(", "'UTF-8'", ")", ")", ".", "hexdigest", "(", ")", "module", "=", "imp", ".", "new_module", "(", "sha256", ")", "try", ":", "exec_", "(", "code", ",", "module", ".", "__dict__", ")", "except", "Exception", "as", "e", ":", "raise", "exceptions", ".", "UserError", "(", "'User code exception'", ",", "exception_message", "=", "str", "(", "e", ")", ")", "sys", ".", "modules", "[", "sha256", "]", "=", "module", "return", "module" ]
Used to run arbitrary passed code as a module Args: code (string): Python code to import as module Returns: module: Python module
[ "Used", "to", "run", "arbitrary", "passed", "code", "as", "a", "module" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L75-L91
train
reiinakano/xcessiv
xcessiv/functions.py
verify_dataset
def verify_dataset(X, y): """Verifies if a dataset is valid for use i.e. scikit-learn format Used to verify a dataset by returning shape and basic statistics of returned data. This will also provide quick and dirty check on capability of host machine to process the data. Args: X (array-like): Features array y (array-like): Label array Returns: X_shape (2-tuple of int): Shape of X returned y_shape (1-tuple of int): Shape of y returned Raises: AssertionError: `X_shape` must be of length 2 and `y_shape` must be of length 1. `X` must have the same number of elements as `y` i.e. X_shape[0] == y_shape[0]. If any of these conditions are not met, an AssertionError is raised. """ X_shape, y_shape = np.array(X).shape, np.array(y).shape if len(X_shape) != 2: raise exceptions.UserError("X must be 2-dimensional array") if len(y_shape) != 1: raise exceptions.UserError("y must be 1-dimensional array") if X_shape[0] != y_shape[0]: raise exceptions.UserError("X must have same number of elements as y") return dict( features_shape=X_shape, labels_shape=y_shape )
python
def verify_dataset(X, y): """Verifies if a dataset is valid for use i.e. scikit-learn format Used to verify a dataset by returning shape and basic statistics of returned data. This will also provide quick and dirty check on capability of host machine to process the data. Args: X (array-like): Features array y (array-like): Label array Returns: X_shape (2-tuple of int): Shape of X returned y_shape (1-tuple of int): Shape of y returned Raises: AssertionError: `X_shape` must be of length 2 and `y_shape` must be of length 1. `X` must have the same number of elements as `y` i.e. X_shape[0] == y_shape[0]. If any of these conditions are not met, an AssertionError is raised. """ X_shape, y_shape = np.array(X).shape, np.array(y).shape if len(X_shape) != 2: raise exceptions.UserError("X must be 2-dimensional array") if len(y_shape) != 1: raise exceptions.UserError("y must be 1-dimensional array") if X_shape[0] != y_shape[0]: raise exceptions.UserError("X must have same number of elements as y") return dict( features_shape=X_shape, labels_shape=y_shape )
[ "def", "verify_dataset", "(", "X", ",", "y", ")", ":", "X_shape", ",", "y_shape", "=", "np", ".", "array", "(", "X", ")", ".", "shape", ",", "np", ".", "array", "(", "y", ")", ".", "shape", "if", "len", "(", "X_shape", ")", "!=", "2", ":", "raise", "exceptions", ".", "UserError", "(", "\"X must be 2-dimensional array\"", ")", "if", "len", "(", "y_shape", ")", "!=", "1", ":", "raise", "exceptions", ".", "UserError", "(", "\"y must be 1-dimensional array\"", ")", "if", "X_shape", "[", "0", "]", "!=", "y_shape", "[", "0", "]", ":", "raise", "exceptions", ".", "UserError", "(", "\"X must have same number of elements as y\"", ")", "return", "dict", "(", "features_shape", "=", "X_shape", ",", "labels_shape", "=", "y_shape", ")" ]
Verifies if a dataset is valid for use i.e. scikit-learn format Used to verify a dataset by returning shape and basic statistics of returned data. This will also provide quick and dirty check on capability of host machine to process the data. Args: X (array-like): Features array y (array-like): Label array Returns: X_shape (2-tuple of int): Shape of X returned y_shape (1-tuple of int): Shape of y returned Raises: AssertionError: `X_shape` must be of length 2 and `y_shape` must be of length 1. `X` must have the same number of elements as `y` i.e. X_shape[0] == y_shape[0]. If any of these conditions are not met, an AssertionError is raised.
[ "Verifies", "if", "a", "dataset", "is", "valid", "for", "use", "i", ".", "e", ".", "scikit", "-", "learn", "format" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L94-L127
train
reiinakano/xcessiv
xcessiv/functions.py
make_serializable
def make_serializable(json): """This function ensures that the dictionary is JSON serializable. If not, keys with non-serializable values are removed from the return value. Args: json (dict): Dictionary to convert to serializable Returns: new_dict (dict): New dictionary with non JSON serializable values removed """ new_dict = dict() for key, value in iteritems(json): if is_valid_json(value): new_dict[key] = value return new_dict
python
def make_serializable(json): """This function ensures that the dictionary is JSON serializable. If not, keys with non-serializable values are removed from the return value. Args: json (dict): Dictionary to convert to serializable Returns: new_dict (dict): New dictionary with non JSON serializable values removed """ new_dict = dict() for key, value in iteritems(json): if is_valid_json(value): new_dict[key] = value return new_dict
[ "def", "make_serializable", "(", "json", ")", ":", "new_dict", "=", "dict", "(", ")", "for", "key", ",", "value", "in", "iteritems", "(", "json", ")", ":", "if", "is_valid_json", "(", "value", ")", ":", "new_dict", "[", "key", "]", "=", "value", "return", "new_dict" ]
This function ensures that the dictionary is JSON serializable. If not, keys with non-serializable values are removed from the return value. Args: json (dict): Dictionary to convert to serializable Returns: new_dict (dict): New dictionary with non JSON serializable values removed
[ "This", "function", "ensures", "that", "the", "dictionary", "is", "JSON", "serializable", ".", "If", "not", "keys", "with", "non", "-", "serializable", "values", "are", "removed", "from", "the", "return", "value", "." ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L143-L158
train
reiinakano/xcessiv
xcessiv/functions.py
get_sample_dataset
def get_sample_dataset(dataset_properties): """Returns sample dataset Args: dataset_properties (dict): Dictionary corresponding to the properties of the dataset used to verify the estimator and metric generators. Returns: X (array-like): Features array y (array-like): Labels array splits (iterator): This is an iterator that returns train test splits for cross-validation purposes on ``X`` and ``y``. """ kwargs = dataset_properties.copy() data_type = kwargs.pop('type') if data_type == 'multiclass': try: X, y = datasets.make_classification(random_state=8, **kwargs) splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y) except Exception as e: raise exceptions.UserError(repr(e)) elif data_type == 'iris': X, y = datasets.load_iris(return_X_y=True) splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y) elif data_type == 'mnist': X, y = datasets.load_digits(return_X_y=True) splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y) elif data_type == 'breast_cancer': X, y = datasets.load_breast_cancer(return_X_y=True) splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y) elif data_type == 'boston': X, y = datasets.load_boston(return_X_y=True) splits = model_selection.KFold(n_splits=2, random_state=8).split(X) elif data_type == 'diabetes': X, y = datasets.load_diabetes(return_X_y=True) splits = model_selection.KFold(n_splits=2, random_state=8).split(X) else: raise exceptions.UserError('Unknown dataset type {}'.format(dataset_properties['type'])) return X, y, splits
python
def get_sample_dataset(dataset_properties): """Returns sample dataset Args: dataset_properties (dict): Dictionary corresponding to the properties of the dataset used to verify the estimator and metric generators. Returns: X (array-like): Features array y (array-like): Labels array splits (iterator): This is an iterator that returns train test splits for cross-validation purposes on ``X`` and ``y``. """ kwargs = dataset_properties.copy() data_type = kwargs.pop('type') if data_type == 'multiclass': try: X, y = datasets.make_classification(random_state=8, **kwargs) splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y) except Exception as e: raise exceptions.UserError(repr(e)) elif data_type == 'iris': X, y = datasets.load_iris(return_X_y=True) splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y) elif data_type == 'mnist': X, y = datasets.load_digits(return_X_y=True) splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y) elif data_type == 'breast_cancer': X, y = datasets.load_breast_cancer(return_X_y=True) splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y) elif data_type == 'boston': X, y = datasets.load_boston(return_X_y=True) splits = model_selection.KFold(n_splits=2, random_state=8).split(X) elif data_type == 'diabetes': X, y = datasets.load_diabetes(return_X_y=True) splits = model_selection.KFold(n_splits=2, random_state=8).split(X) else: raise exceptions.UserError('Unknown dataset type {}'.format(dataset_properties['type'])) return X, y, splits
[ "def", "get_sample_dataset", "(", "dataset_properties", ")", ":", "kwargs", "=", "dataset_properties", ".", "copy", "(", ")", "data_type", "=", "kwargs", ".", "pop", "(", "'type'", ")", "if", "data_type", "==", "'multiclass'", ":", "try", ":", "X", ",", "y", "=", "datasets", ".", "make_classification", "(", "random_state", "=", "8", ",", "*", "*", "kwargs", ")", "splits", "=", "model_selection", ".", "StratifiedKFold", "(", "n_splits", "=", "2", ",", "random_state", "=", "8", ")", ".", "split", "(", "X", ",", "y", ")", "except", "Exception", "as", "e", ":", "raise", "exceptions", ".", "UserError", "(", "repr", "(", "e", ")", ")", "elif", "data_type", "==", "'iris'", ":", "X", ",", "y", "=", "datasets", ".", "load_iris", "(", "return_X_y", "=", "True", ")", "splits", "=", "model_selection", ".", "StratifiedKFold", "(", "n_splits", "=", "2", ",", "random_state", "=", "8", ")", ".", "split", "(", "X", ",", "y", ")", "elif", "data_type", "==", "'mnist'", ":", "X", ",", "y", "=", "datasets", ".", "load_digits", "(", "return_X_y", "=", "True", ")", "splits", "=", "model_selection", ".", "StratifiedKFold", "(", "n_splits", "=", "2", ",", "random_state", "=", "8", ")", ".", "split", "(", "X", ",", "y", ")", "elif", "data_type", "==", "'breast_cancer'", ":", "X", ",", "y", "=", "datasets", ".", "load_breast_cancer", "(", "return_X_y", "=", "True", ")", "splits", "=", "model_selection", ".", "StratifiedKFold", "(", "n_splits", "=", "2", ",", "random_state", "=", "8", ")", ".", "split", "(", "X", ",", "y", ")", "elif", "data_type", "==", "'boston'", ":", "X", ",", "y", "=", "datasets", ".", "load_boston", "(", "return_X_y", "=", "True", ")", "splits", "=", "model_selection", ".", "KFold", "(", "n_splits", "=", "2", ",", "random_state", "=", "8", ")", ".", "split", "(", "X", ")", "elif", "data_type", "==", "'diabetes'", ":", "X", ",", "y", "=", "datasets", ".", "load_diabetes", "(", "return_X_y", "=", "True", ")", "splits", "=", "model_selection", ".", "KFold", "(", "n_splits", "=", "2", ",", "random_state", "=", "8", ")", ".", "split", "(", "X", ")", "else", ":", "raise", "exceptions", ".", "UserError", "(", "'Unknown dataset type {}'", ".", "format", "(", "dataset_properties", "[", "'type'", "]", ")", ")", "return", "X", ",", "y", ",", "splits" ]
Returns sample dataset Args: dataset_properties (dict): Dictionary corresponding to the properties of the dataset used to verify the estimator and metric generators. Returns: X (array-like): Features array y (array-like): Labels array splits (iterator): This is an iterator that returns train test splits for cross-validation purposes on ``X`` and ``y``.
[ "Returns", "sample", "dataset" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L161-L201
train
reiinakano/xcessiv
xcessiv/functions.py
verify_estimator_class
def verify_estimator_class(est, meta_feature_generator, metric_generators, dataset_properties): """Verify if estimator object is valid for use i.e. scikit-learn format Verifies if an estimator is fit for use by testing for existence of methods such as `get_params` and `set_params`. Must also be able to properly fit on and predict a sample iris dataset. Args: est: Estimator object with `fit`, `predict`/`predict_proba`, `get_params`, and `set_params` methods. meta_feature_generator (str, unicode): Name of the method used by the estimator to generate meta-features on a set of data. metric_generators (dict): Dictionary of key value pairs where the key signifies the name of the metric calculated and the value is a list of strings, when concatenated, form Python code containing the function used to calculate the metric from true values and the meta-features generated. dataset_properties (dict): Dictionary corresponding to the properties of the dataset used to verify the estimator and metric generators. Returns: performance_dict (mapping): Mapping from performance metric name to performance metric value e.g. "Accuracy": 0.963 hyperparameters (mapping): Mapping from the estimator's hyperparameters to their default values e.g. "n_estimators": 10 """ X, y, splits = get_sample_dataset(dataset_properties) if not hasattr(est, "get_params"): raise exceptions.UserError('Estimator does not have get_params method') if not hasattr(est, "set_params"): raise exceptions.UserError('Estimator does not have set_params method') if not hasattr(est, meta_feature_generator): raise exceptions.UserError('Estimator does not have meta-feature generator' ' {}'.format(meta_feature_generator)) performance_dict = dict() true_labels = [] preds = [] try: for train_index, test_index in splits: X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] est.fit(X_train, y_train) true_labels.append(y_test) preds.append(getattr(est, meta_feature_generator)(X_test)) true_labels = np.concatenate(true_labels) preds = np.concatenate(preds, axis=0) except Exception as e: raise exceptions.UserError(repr(e)) if preds.shape[0] != true_labels.shape[0]: raise exceptions.UserError('Estimator\'s meta-feature generator ' 'does not produce valid shape') for key in metric_generators: metric_generator = import_object_from_string_code( metric_generators[key], 'metric_generator' ) try: performance_dict[key] = metric_generator(true_labels, preds) except Exception as e: raise exceptions.UserError(repr(e)) return performance_dict, make_serializable(est.get_params())
python
def verify_estimator_class(est, meta_feature_generator, metric_generators, dataset_properties): """Verify if estimator object is valid for use i.e. scikit-learn format Verifies if an estimator is fit for use by testing for existence of methods such as `get_params` and `set_params`. Must also be able to properly fit on and predict a sample iris dataset. Args: est: Estimator object with `fit`, `predict`/`predict_proba`, `get_params`, and `set_params` methods. meta_feature_generator (str, unicode): Name of the method used by the estimator to generate meta-features on a set of data. metric_generators (dict): Dictionary of key value pairs where the key signifies the name of the metric calculated and the value is a list of strings, when concatenated, form Python code containing the function used to calculate the metric from true values and the meta-features generated. dataset_properties (dict): Dictionary corresponding to the properties of the dataset used to verify the estimator and metric generators. Returns: performance_dict (mapping): Mapping from performance metric name to performance metric value e.g. "Accuracy": 0.963 hyperparameters (mapping): Mapping from the estimator's hyperparameters to their default values e.g. "n_estimators": 10 """ X, y, splits = get_sample_dataset(dataset_properties) if not hasattr(est, "get_params"): raise exceptions.UserError('Estimator does not have get_params method') if not hasattr(est, "set_params"): raise exceptions.UserError('Estimator does not have set_params method') if not hasattr(est, meta_feature_generator): raise exceptions.UserError('Estimator does not have meta-feature generator' ' {}'.format(meta_feature_generator)) performance_dict = dict() true_labels = [] preds = [] try: for train_index, test_index in splits: X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] est.fit(X_train, y_train) true_labels.append(y_test) preds.append(getattr(est, meta_feature_generator)(X_test)) true_labels = np.concatenate(true_labels) preds = np.concatenate(preds, axis=0) except Exception as e: raise exceptions.UserError(repr(e)) if preds.shape[0] != true_labels.shape[0]: raise exceptions.UserError('Estimator\'s meta-feature generator ' 'does not produce valid shape') for key in metric_generators: metric_generator = import_object_from_string_code( metric_generators[key], 'metric_generator' ) try: performance_dict[key] = metric_generator(true_labels, preds) except Exception as e: raise exceptions.UserError(repr(e)) return performance_dict, make_serializable(est.get_params())
[ "def", "verify_estimator_class", "(", "est", ",", "meta_feature_generator", ",", "metric_generators", ",", "dataset_properties", ")", ":", "X", ",", "y", ",", "splits", "=", "get_sample_dataset", "(", "dataset_properties", ")", "if", "not", "hasattr", "(", "est", ",", "\"get_params\"", ")", ":", "raise", "exceptions", ".", "UserError", "(", "'Estimator does not have get_params method'", ")", "if", "not", "hasattr", "(", "est", ",", "\"set_params\"", ")", ":", "raise", "exceptions", ".", "UserError", "(", "'Estimator does not have set_params method'", ")", "if", "not", "hasattr", "(", "est", ",", "meta_feature_generator", ")", ":", "raise", "exceptions", ".", "UserError", "(", "'Estimator does not have meta-feature generator'", "' {}'", ".", "format", "(", "meta_feature_generator", ")", ")", "performance_dict", "=", "dict", "(", ")", "true_labels", "=", "[", "]", "preds", "=", "[", "]", "try", ":", "for", "train_index", ",", "test_index", "in", "splits", ":", "X_train", ",", "X_test", "=", "X", "[", "train_index", "]", ",", "X", "[", "test_index", "]", "y_train", ",", "y_test", "=", "y", "[", "train_index", "]", ",", "y", "[", "test_index", "]", "est", ".", "fit", "(", "X_train", ",", "y_train", ")", "true_labels", ".", "append", "(", "y_test", ")", "preds", ".", "append", "(", "getattr", "(", "est", ",", "meta_feature_generator", ")", "(", "X_test", ")", ")", "true_labels", "=", "np", ".", "concatenate", "(", "true_labels", ")", "preds", "=", "np", ".", "concatenate", "(", "preds", ",", "axis", "=", "0", ")", "except", "Exception", "as", "e", ":", "raise", "exceptions", ".", "UserError", "(", "repr", "(", "e", ")", ")", "if", "preds", ".", "shape", "[", "0", "]", "!=", "true_labels", ".", "shape", "[", "0", "]", ":", "raise", "exceptions", ".", "UserError", "(", "'Estimator\\'s meta-feature generator '", "'does not produce valid shape'", ")", "for", "key", "in", "metric_generators", ":", "metric_generator", "=", "import_object_from_string_code", "(", "metric_generators", "[", "key", "]", ",", "'metric_generator'", ")", "try", ":", "performance_dict", "[", "key", "]", "=", "metric_generator", "(", "true_labels", ",", "preds", ")", "except", "Exception", "as", "e", ":", "raise", "exceptions", ".", "UserError", "(", "repr", "(", "e", ")", ")", "return", "performance_dict", ",", "make_serializable", "(", "est", ".", "get_params", "(", ")", ")" ]
Verify if estimator object is valid for use i.e. scikit-learn format Verifies if an estimator is fit for use by testing for existence of methods such as `get_params` and `set_params`. Must also be able to properly fit on and predict a sample iris dataset. Args: est: Estimator object with `fit`, `predict`/`predict_proba`, `get_params`, and `set_params` methods. meta_feature_generator (str, unicode): Name of the method used by the estimator to generate meta-features on a set of data. metric_generators (dict): Dictionary of key value pairs where the key signifies the name of the metric calculated and the value is a list of strings, when concatenated, form Python code containing the function used to calculate the metric from true values and the meta-features generated. dataset_properties (dict): Dictionary corresponding to the properties of the dataset used to verify the estimator and metric generators. Returns: performance_dict (mapping): Mapping from performance metric name to performance metric value e.g. "Accuracy": 0.963 hyperparameters (mapping): Mapping from the estimator's hyperparameters to their default values e.g. "n_estimators": 10
[ "Verify", "if", "estimator", "object", "is", "valid", "for", "use", "i", ".", "e", ".", "scikit", "-", "learn", "format" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L204-L275
train
reiinakano/xcessiv
xcessiv/functions.py
get_path_from_query_string
def get_path_from_query_string(req): """Gets path from query string Args: req (flask.request): Request object from Flask Returns: path (str): Value of "path" parameter from query string Raises: exceptions.UserError: If "path" is not found in query string """ if req.args.get('path') is None: raise exceptions.UserError('Path not found in query string') return req.args.get('path')
python
def get_path_from_query_string(req): """Gets path from query string Args: req (flask.request): Request object from Flask Returns: path (str): Value of "path" parameter from query string Raises: exceptions.UserError: If "path" is not found in query string """ if req.args.get('path') is None: raise exceptions.UserError('Path not found in query string') return req.args.get('path')
[ "def", "get_path_from_query_string", "(", "req", ")", ":", "if", "req", ".", "args", ".", "get", "(", "'path'", ")", "is", "None", ":", "raise", "exceptions", ".", "UserError", "(", "'Path not found in query string'", ")", "return", "req", ".", "args", ".", "get", "(", "'path'", ")" ]
Gets path from query string Args: req (flask.request): Request object from Flask Returns: path (str): Value of "path" parameter from query string Raises: exceptions.UserError: If "path" is not found in query string
[ "Gets", "path", "from", "query", "string" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L278-L292
train
reiinakano/xcessiv
xcessiv/models.py
Extraction.return_main_dataset
def return_main_dataset(self): """Returns main data set from self Returns: X (numpy.ndarray): Features y (numpy.ndarray): Labels """ if not self.main_dataset['source']: raise exceptions.UserError('Source is empty') extraction_code = self.main_dataset["source"] extraction_function = functions.import_object_from_string_code(extraction_code, "extract_main_dataset") try: X, y = extraction_function() except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) X, y = np.array(X), np.array(y) return X, y
python
def return_main_dataset(self): """Returns main data set from self Returns: X (numpy.ndarray): Features y (numpy.ndarray): Labels """ if not self.main_dataset['source']: raise exceptions.UserError('Source is empty') extraction_code = self.main_dataset["source"] extraction_function = functions.import_object_from_string_code(extraction_code, "extract_main_dataset") try: X, y = extraction_function() except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) X, y = np.array(X), np.array(y) return X, y
[ "def", "return_main_dataset", "(", "self", ")", ":", "if", "not", "self", ".", "main_dataset", "[", "'source'", "]", ":", "raise", "exceptions", ".", "UserError", "(", "'Source is empty'", ")", "extraction_code", "=", "self", ".", "main_dataset", "[", "\"source\"", "]", "extraction_function", "=", "functions", ".", "import_object_from_string_code", "(", "extraction_code", ",", "\"extract_main_dataset\"", ")", "try", ":", "X", ",", "y", "=", "extraction_function", "(", ")", "except", "Exception", "as", "e", ":", "raise", "exceptions", ".", "UserError", "(", "'User code exception'", ",", "exception_message", "=", "str", "(", "e", ")", ")", "X", ",", "y", "=", "np", ".", "array", "(", "X", ")", ",", "np", ".", "array", "(", "y", ")", "return", "X", ",", "y" ]
Returns main data set from self Returns: X (numpy.ndarray): Features y (numpy.ndarray): Labels
[ "Returns", "main", "data", "set", "from", "self" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L70-L92
train
reiinakano/xcessiv
xcessiv/models.py
Extraction.return_train_dataset
def return_train_dataset(self): """Returns train data set Returns: X (numpy.ndarray): Features y (numpy.ndarray): Labels """ X, y = self.return_main_dataset() if self.test_dataset['method'] == 'split_from_main': X, X_test, y, y_test = train_test_split( X, y, test_size=self.test_dataset['split_ratio'], random_state=self.test_dataset['split_seed'], stratify=y ) return X, y
python
def return_train_dataset(self): """Returns train data set Returns: X (numpy.ndarray): Features y (numpy.ndarray): Labels """ X, y = self.return_main_dataset() if self.test_dataset['method'] == 'split_from_main': X, X_test, y, y_test = train_test_split( X, y, test_size=self.test_dataset['split_ratio'], random_state=self.test_dataset['split_seed'], stratify=y ) return X, y
[ "def", "return_train_dataset", "(", "self", ")", ":", "X", ",", "y", "=", "self", ".", "return_main_dataset", "(", ")", "if", "self", ".", "test_dataset", "[", "'method'", "]", "==", "'split_from_main'", ":", "X", ",", "X_test", ",", "y", ",", "y_test", "=", "train_test_split", "(", "X", ",", "y", ",", "test_size", "=", "self", ".", "test_dataset", "[", "'split_ratio'", "]", ",", "random_state", "=", "self", ".", "test_dataset", "[", "'split_seed'", "]", ",", "stratify", "=", "y", ")", "return", "X", ",", "y" ]
Returns train data set Returns: X (numpy.ndarray): Features y (numpy.ndarray): Labels
[ "Returns", "train", "data", "set" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L94-L113
train
reiinakano/xcessiv
xcessiv/models.py
BaseLearnerOrigin.return_estimator
def return_estimator(self): """Returns estimator from base learner origin Returns: est (estimator): Estimator object """ extraction_code = self.source estimator = functions.import_object_from_string_code(extraction_code, "base_learner") return estimator
python
def return_estimator(self): """Returns estimator from base learner origin Returns: est (estimator): Estimator object """ extraction_code = self.source estimator = functions.import_object_from_string_code(extraction_code, "base_learner") return estimator
[ "def", "return_estimator", "(", "self", ")", ":", "extraction_code", "=", "self", ".", "source", "estimator", "=", "functions", ".", "import_object_from_string_code", "(", "extraction_code", ",", "\"base_learner\"", ")", "return", "estimator" ]
Returns estimator from base learner origin Returns: est (estimator): Estimator object
[ "Returns", "estimator", "from", "base", "learner", "origin" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L192-L201
train
reiinakano/xcessiv
xcessiv/models.py
BaseLearnerOrigin.export_as_file
def export_as_file(self, filepath, hyperparameters): """Generates a Python file with the importable base learner set to ``hyperparameters`` This function generates a Python file in the specified file path that contains the base learner as an importable variable stored in ``base_learner``. The base learner will be set to the appropriate hyperparameters through ``set_params``. Args: filepath (str, unicode): File path to save file in hyperparameters (dict): Dictionary to use for ``set_params`` """ if not filepath.endswith('.py'): filepath += '.py' file_contents = '' file_contents += self.source file_contents += '\n\nbase_learner.set_params(**{})\n'.format(hyperparameters) file_contents += '\nmeta_feature_generator = "{}"\n'.format(self.meta_feature_generator) with open(filepath, 'wb') as f: f.write(file_contents.encode('utf8'))
python
def export_as_file(self, filepath, hyperparameters): """Generates a Python file with the importable base learner set to ``hyperparameters`` This function generates a Python file in the specified file path that contains the base learner as an importable variable stored in ``base_learner``. The base learner will be set to the appropriate hyperparameters through ``set_params``. Args: filepath (str, unicode): File path to save file in hyperparameters (dict): Dictionary to use for ``set_params`` """ if not filepath.endswith('.py'): filepath += '.py' file_contents = '' file_contents += self.source file_contents += '\n\nbase_learner.set_params(**{})\n'.format(hyperparameters) file_contents += '\nmeta_feature_generator = "{}"\n'.format(self.meta_feature_generator) with open(filepath, 'wb') as f: f.write(file_contents.encode('utf8'))
[ "def", "export_as_file", "(", "self", ",", "filepath", ",", "hyperparameters", ")", ":", "if", "not", "filepath", ".", "endswith", "(", "'.py'", ")", ":", "filepath", "+=", "'.py'", "file_contents", "=", "''", "file_contents", "+=", "self", ".", "source", "file_contents", "+=", "'\\n\\nbase_learner.set_params(**{})\\n'", ".", "format", "(", "hyperparameters", ")", "file_contents", "+=", "'\\nmeta_feature_generator = \"{}\"\\n'", ".", "format", "(", "self", ".", "meta_feature_generator", ")", "with", "open", "(", "filepath", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "file_contents", ".", "encode", "(", "'utf8'", ")", ")" ]
Generates a Python file with the importable base learner set to ``hyperparameters`` This function generates a Python file in the specified file path that contains the base learner as an importable variable stored in ``base_learner``. The base learner will be set to the appropriate hyperparameters through ``set_params``. Args: filepath (str, unicode): File path to save file in hyperparameters (dict): Dictionary to use for ``set_params``
[ "Generates", "a", "Python", "file", "with", "the", "importable", "base", "learner", "set", "to", "hyperparameters" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L212-L232
train
reiinakano/xcessiv
xcessiv/models.py
BaseLearner.return_estimator
def return_estimator(self): """Returns base learner using its origin and the given hyperparameters Returns: est (estimator): Estimator object """ estimator = self.base_learner_origin.return_estimator() estimator = estimator.set_params(**self.hyperparameters) return estimator
python
def return_estimator(self): """Returns base learner using its origin and the given hyperparameters Returns: est (estimator): Estimator object """ estimator = self.base_learner_origin.return_estimator() estimator = estimator.set_params(**self.hyperparameters) return estimator
[ "def", "return_estimator", "(", "self", ")", ":", "estimator", "=", "self", ".", "base_learner_origin", ".", "return_estimator", "(", ")", "estimator", "=", "estimator", ".", "set_params", "(", "*", "*", "self", ".", "hyperparameters", ")", "return", "estimator" ]
Returns base learner using its origin and the given hyperparameters Returns: est (estimator): Estimator object
[ "Returns", "base", "learner", "using", "its", "origin", "and", "the", "given", "hyperparameters" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L307-L315
train
reiinakano/xcessiv
xcessiv/models.py
BaseLearner.meta_features_path
def meta_features_path(self, path): """Returns path for meta-features Args: path (str): Absolute/local path of xcessiv folder """ return os.path.join( path, app.config['XCESSIV_META_FEATURES_FOLDER'], str(self.id) ) + '.npy'
python
def meta_features_path(self, path): """Returns path for meta-features Args: path (str): Absolute/local path of xcessiv folder """ return os.path.join( path, app.config['XCESSIV_META_FEATURES_FOLDER'], str(self.id) ) + '.npy'
[ "def", "meta_features_path", "(", "self", ",", "path", ")", ":", "return", "os", ".", "path", ".", "join", "(", "path", ",", "app", ".", "config", "[", "'XCESSIV_META_FEATURES_FOLDER'", "]", ",", "str", "(", "self", ".", "id", ")", ")", "+", "'.npy'" ]
Returns path for meta-features Args: path (str): Absolute/local path of xcessiv folder
[ "Returns", "path", "for", "meta", "-", "features" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L317-L327
train
reiinakano/xcessiv
xcessiv/models.py
BaseLearner.delete_meta_features
def delete_meta_features(self, path): """Deletes meta-features of base learner if it exists Args: path (str): Absolute/local path of xcessiv folder """ if os.path.exists(self.meta_features_path(path)): os.remove(self.meta_features_path(path))
python
def delete_meta_features(self, path): """Deletes meta-features of base learner if it exists Args: path (str): Absolute/local path of xcessiv folder """ if os.path.exists(self.meta_features_path(path)): os.remove(self.meta_features_path(path))
[ "def", "delete_meta_features", "(", "self", ",", "path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "meta_features_path", "(", "path", ")", ")", ":", "os", ".", "remove", "(", "self", ".", "meta_features_path", "(", "path", ")", ")" ]
Deletes meta-features of base learner if it exists Args: path (str): Absolute/local path of xcessiv folder
[ "Deletes", "meta", "-", "features", "of", "base", "learner", "if", "it", "exists" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L342-L349
train
reiinakano/xcessiv
xcessiv/models.py
StackedEnsemble.return_secondary_learner
def return_secondary_learner(self): """Returns secondary learner using its origin and the given hyperparameters Returns: est (estimator): Estimator object """ estimator = self.base_learner_origin.return_estimator() estimator = estimator.set_params(**self.secondary_learner_hyperparameters) return estimator
python
def return_secondary_learner(self): """Returns secondary learner using its origin and the given hyperparameters Returns: est (estimator): Estimator object """ estimator = self.base_learner_origin.return_estimator() estimator = estimator.set_params(**self.secondary_learner_hyperparameters) return estimator
[ "def", "return_secondary_learner", "(", "self", ")", ":", "estimator", "=", "self", ".", "base_learner_origin", ".", "return_estimator", "(", ")", "estimator", "=", "estimator", ".", "set_params", "(", "*", "*", "self", ".", "secondary_learner_hyperparameters", ")", "return", "estimator" ]
Returns secondary learner using its origin and the given hyperparameters Returns: est (estimator): Estimator object
[ "Returns", "secondary", "learner", "using", "its", "origin", "and", "the", "given", "hyperparameters" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L402-L410
train
reiinakano/xcessiv
xcessiv/models.py
StackedEnsemble.export_as_code
def export_as_code(self, cv_source): """Returns a string value that contains the Python code for the ensemble Args: cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features. Returns: base_learner_code (str, unicode): String that can be used as Python code """ rand_value = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(25)) base_learner_code = '' base_learner_code += 'base_learner_list_{} = []\n'.format(rand_value) base_learner_code += 'meta_feature_generators_list_{} = []\n\n'.format(rand_value) for idx, base_learner in enumerate(self.base_learners): base_learner_code += '################################################\n' base_learner_code += '###### Code for building base learner {} ########\n'.format(idx+1) base_learner_code += '################################################\n' base_learner_code += base_learner.base_learner_origin.source base_learner_code += '\n\n' base_learner_code += 'base_learner' \ '.set_params(**{})\n'.format(base_learner.hyperparameters) base_learner_code += 'base_learner_list_{}.append(base_learner)\n'.format(rand_value) base_learner_code += 'meta_feature_generators_list_{}.append("{}")\n'.format( rand_value, base_learner.base_learner_origin.meta_feature_generator ) base_learner_code += '\n\n' base_learner_code += '################################################\n' base_learner_code += '##### Code for building secondary learner ######\n' base_learner_code += '################################################\n' base_learner_code += self.base_learner_origin.source base_learner_code += '\n\n' base_learner_code += 'base_learner' \ '.set_params(**{})\n'.format(self.secondary_learner_hyperparameters) base_learner_code += 'secondary_learner_{} = base_learner\n'.format(rand_value) base_learner_code += '\n\n' base_learner_code += '################################################\n' base_learner_code += '############## Code for CV method ##############\n' base_learner_code += '################################################\n' base_learner_code += cv_source base_learner_code += '\n\n' base_learner_code += '################################################\n' base_learner_code += '######## Code for Xcessiv stacker class ########\n' base_learner_code += '################################################\n' stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py') with open(stacker_file_loc) as f2: base_learner_code += f2.read() base_learner_code += '\n\n' \ ' def {}(self, X):\n' \ ' return self._process_using_' \ 'meta_feature_generator(X, "{}")\n\n'\ .format(self.base_learner_origin.meta_feature_generator, self.base_learner_origin.meta_feature_generator) base_learner_code += '\n\n' base_learner_code += 'base_learner = XcessivStackedEnsemble' \ '(base_learners=base_learner_list_{},' \ ' meta_feature_generators=meta_feature_generators_list_{},' \ ' secondary_learner=secondary_learner_{},' \ ' cv_function=return_splits_iterable)\n'.format( rand_value, rand_value, rand_value ) return base_learner_code
python
def export_as_code(self, cv_source): """Returns a string value that contains the Python code for the ensemble Args: cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features. Returns: base_learner_code (str, unicode): String that can be used as Python code """ rand_value = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(25)) base_learner_code = '' base_learner_code += 'base_learner_list_{} = []\n'.format(rand_value) base_learner_code += 'meta_feature_generators_list_{} = []\n\n'.format(rand_value) for idx, base_learner in enumerate(self.base_learners): base_learner_code += '################################################\n' base_learner_code += '###### Code for building base learner {} ########\n'.format(idx+1) base_learner_code += '################################################\n' base_learner_code += base_learner.base_learner_origin.source base_learner_code += '\n\n' base_learner_code += 'base_learner' \ '.set_params(**{})\n'.format(base_learner.hyperparameters) base_learner_code += 'base_learner_list_{}.append(base_learner)\n'.format(rand_value) base_learner_code += 'meta_feature_generators_list_{}.append("{}")\n'.format( rand_value, base_learner.base_learner_origin.meta_feature_generator ) base_learner_code += '\n\n' base_learner_code += '################################################\n' base_learner_code += '##### Code for building secondary learner ######\n' base_learner_code += '################################################\n' base_learner_code += self.base_learner_origin.source base_learner_code += '\n\n' base_learner_code += 'base_learner' \ '.set_params(**{})\n'.format(self.secondary_learner_hyperparameters) base_learner_code += 'secondary_learner_{} = base_learner\n'.format(rand_value) base_learner_code += '\n\n' base_learner_code += '################################################\n' base_learner_code += '############## Code for CV method ##############\n' base_learner_code += '################################################\n' base_learner_code += cv_source base_learner_code += '\n\n' base_learner_code += '################################################\n' base_learner_code += '######## Code for Xcessiv stacker class ########\n' base_learner_code += '################################################\n' stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py') with open(stacker_file_loc) as f2: base_learner_code += f2.read() base_learner_code += '\n\n' \ ' def {}(self, X):\n' \ ' return self._process_using_' \ 'meta_feature_generator(X, "{}")\n\n'\ .format(self.base_learner_origin.meta_feature_generator, self.base_learner_origin.meta_feature_generator) base_learner_code += '\n\n' base_learner_code += 'base_learner = XcessivStackedEnsemble' \ '(base_learners=base_learner_list_{},' \ ' meta_feature_generators=meta_feature_generators_list_{},' \ ' secondary_learner=secondary_learner_{},' \ ' cv_function=return_splits_iterable)\n'.format( rand_value, rand_value, rand_value ) return base_learner_code
[ "def", "export_as_code", "(", "self", ",", "cv_source", ")", ":", "rand_value", "=", "''", ".", "join", "(", "random", ".", "choice", "(", "string", ".", "ascii_uppercase", "+", "string", ".", "digits", ")", "for", "_", "in", "range", "(", "25", ")", ")", "base_learner_code", "=", "''", "base_learner_code", "+=", "'base_learner_list_{} = []\\n'", ".", "format", "(", "rand_value", ")", "base_learner_code", "+=", "'meta_feature_generators_list_{} = []\\n\\n'", ".", "format", "(", "rand_value", ")", "for", "idx", ",", "base_learner", "in", "enumerate", "(", "self", ".", "base_learners", ")", ":", "base_learner_code", "+=", "'################################################\\n'", "base_learner_code", "+=", "'###### Code for building base learner {} ########\\n'", ".", "format", "(", "idx", "+", "1", ")", "base_learner_code", "+=", "'################################################\\n'", "base_learner_code", "+=", "base_learner", ".", "base_learner_origin", ".", "source", "base_learner_code", "+=", "'\\n\\n'", "base_learner_code", "+=", "'base_learner'", "'.set_params(**{})\\n'", ".", "format", "(", "base_learner", ".", "hyperparameters", ")", "base_learner_code", "+=", "'base_learner_list_{}.append(base_learner)\\n'", ".", "format", "(", "rand_value", ")", "base_learner_code", "+=", "'meta_feature_generators_list_{}.append(\"{}\")\\n'", ".", "format", "(", "rand_value", ",", "base_learner", ".", "base_learner_origin", ".", "meta_feature_generator", ")", "base_learner_code", "+=", "'\\n\\n'", "base_learner_code", "+=", "'################################################\\n'", "base_learner_code", "+=", "'##### Code for building secondary learner ######\\n'", "base_learner_code", "+=", "'################################################\\n'", "base_learner_code", "+=", "self", ".", "base_learner_origin", ".", "source", "base_learner_code", "+=", "'\\n\\n'", "base_learner_code", "+=", "'base_learner'", "'.set_params(**{})\\n'", ".", "format", "(", "self", ".", "secondary_learner_hyperparameters", ")", "base_learner_code", "+=", "'secondary_learner_{} = base_learner\\n'", ".", "format", "(", "rand_value", ")", "base_learner_code", "+=", "'\\n\\n'", "base_learner_code", "+=", "'################################################\\n'", "base_learner_code", "+=", "'############## Code for CV method ##############\\n'", "base_learner_code", "+=", "'################################################\\n'", "base_learner_code", "+=", "cv_source", "base_learner_code", "+=", "'\\n\\n'", "base_learner_code", "+=", "'################################################\\n'", "base_learner_code", "+=", "'######## Code for Xcessiv stacker class ########\\n'", "base_learner_code", "+=", "'################################################\\n'", "stacker_file_loc", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", ",", "'stacker.py'", ")", "with", "open", "(", "stacker_file_loc", ")", "as", "f2", ":", "base_learner_code", "+=", "f2", ".", "read", "(", ")", "base_learner_code", "+=", "'\\n\\n'", "' def {}(self, X):\\n'", "' return self._process_using_'", "'meta_feature_generator(X, \"{}\")\\n\\n'", ".", "format", "(", "self", ".", "base_learner_origin", ".", "meta_feature_generator", ",", "self", ".", "base_learner_origin", ".", "meta_feature_generator", ")", "base_learner_code", "+=", "'\\n\\n'", "base_learner_code", "+=", "'base_learner = XcessivStackedEnsemble'", "'(base_learners=base_learner_list_{},'", "' meta_feature_generators=meta_feature_generators_list_{},'", "' secondary_learner=secondary_learner_{},'", "' cv_function=return_splits_iterable)\\n'", ".", "format", "(", "rand_value", ",", "rand_value", ",", "rand_value", ")", "return", "base_learner_code" ]
Returns a string value that contains the Python code for the ensemble Args: cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features. Returns: base_learner_code (str, unicode): String that can be used as Python code
[ "Returns", "a", "string", "value", "that", "contains", "the", "Python", "code", "for", "the", "ensemble" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L412-L486
train
reiinakano/xcessiv
xcessiv/models.py
StackedEnsemble.export_as_file
def export_as_file(self, file_path, cv_source): """Export the ensemble as a single Python file and saves it to `file_path`. This is EXPERIMENTAL as putting different modules together would probably wreak havoc especially on modules that make heavy use of global variables. Args: file_path (str, unicode): Absolute/local path of place to save file in cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features. """ if os.path.exists(file_path): raise exceptions.UserError('{} already exists'.format(file_path)) with open(file_path, 'wb') as f: f.write(self.export_as_code(cv_source).encode('utf8'))
python
def export_as_file(self, file_path, cv_source): """Export the ensemble as a single Python file and saves it to `file_path`. This is EXPERIMENTAL as putting different modules together would probably wreak havoc especially on modules that make heavy use of global variables. Args: file_path (str, unicode): Absolute/local path of place to save file in cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features. """ if os.path.exists(file_path): raise exceptions.UserError('{} already exists'.format(file_path)) with open(file_path, 'wb') as f: f.write(self.export_as_code(cv_source).encode('utf8'))
[ "def", "export_as_file", "(", "self", ",", "file_path", ",", "cv_source", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "raise", "exceptions", ".", "UserError", "(", "'{} already exists'", ".", "format", "(", "file_path", ")", ")", "with", "open", "(", "file_path", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "self", ".", "export_as_code", "(", "cv_source", ")", ".", "encode", "(", "'utf8'", ")", ")" ]
Export the ensemble as a single Python file and saves it to `file_path`. This is EXPERIMENTAL as putting different modules together would probably wreak havoc especially on modules that make heavy use of global variables. Args: file_path (str, unicode): Absolute/local path of place to save file in cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features.
[ "Export", "the", "ensemble", "as", "a", "single", "Python", "file", "and", "saves", "it", "to", "file_path", "." ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L488-L504
train
reiinakano/xcessiv
xcessiv/models.py
StackedEnsemble.export_as_package
def export_as_package(self, package_path, cv_source): """Exports the ensemble as a Python package and saves it to `package_path`. Args: package_path (str, unicode): Absolute/local path of place to save package in cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features. Raises: exceptions.UserError: If os.path.join(path, name) already exists. """ if os.path.exists(package_path): raise exceptions.UserError('{} already exists'.format(package_path)) package_name = os.path.basename(os.path.normpath(package_path)) os.makedirs(package_path) # Write __init__.py with open(os.path.join(package_path, '__init__.py'), 'wb') as f: f.write('from {}.builder import xcessiv_ensemble'.format(package_name).encode('utf8')) # Create package baselearners with each base learner having its own module os.makedirs(os.path.join(package_path, 'baselearners')) open(os.path.join(package_path, 'baselearners', '__init__.py'), 'a').close() for idx, base_learner in enumerate(self.base_learners): base_learner.export_as_file(os.path.join(package_path, 'baselearners', 'baselearner' + str(idx))) # Create metalearner.py containing secondary learner self.base_learner_origin.export_as_file( os.path.join(package_path, 'metalearner'), self.secondary_learner_hyperparameters ) # Create cv.py containing CV method for getting meta-features with open(os.path.join(package_path, 'cv.py'), 'wb') as f: f.write(cv_source.encode('utf8')) # Create stacker.py containing class for Xcessiv ensemble ensemble_source = '' stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py') with open(stacker_file_loc) as f: ensemble_source += f.read() ensemble_source += '\n\n' \ ' def {}(self, X):\n' \ ' return self._process_using_' \ 'meta_feature_generator(X, "{}")\n\n'\ .format(self.base_learner_origin.meta_feature_generator, self.base_learner_origin.meta_feature_generator) with open(os.path.join(package_path, 'stacker.py'), 'wb') as f: f.write(ensemble_source.encode('utf8')) # Create builder.py containing file where `xcessiv_ensemble` is instantiated for import builder_source = '' for idx, base_learner in enumerate(self.base_learners): builder_source += 'from {}.baselearners import baselearner{}\n'.format(package_name, idx) builder_source += 'from {}.cv import return_splits_iterable\n'.format(package_name) builder_source += 'from {} import metalearner\n'.format(package_name) builder_source += 'from {}.stacker import XcessivStackedEnsemble\n'.format(package_name) builder_source += '\nbase_learners = [\n' for idx, base_learner in enumerate(self.base_learners): builder_source += ' baselearner{}.base_learner,\n'.format(idx) builder_source += ']\n' builder_source += '\nmeta_feature_generators = [\n' for idx, base_learner in enumerate(self.base_learners): builder_source += ' baselearner{}.meta_feature_generator,\n'.format(idx) builder_source += ']\n' builder_source += '\nxcessiv_ensemble = XcessivStackedEnsemble(base_learners=base_learners,' \ ' meta_feature_generators=meta_feature_generators,' \ ' secondary_learner=metalearner.base_learner,' \ ' cv_function=return_splits_iterable)\n' with open(os.path.join(package_path, 'builder.py'), 'wb') as f: f.write(builder_source.encode('utf8'))
python
def export_as_package(self, package_path, cv_source): """Exports the ensemble as a Python package and saves it to `package_path`. Args: package_path (str, unicode): Absolute/local path of place to save package in cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features. Raises: exceptions.UserError: If os.path.join(path, name) already exists. """ if os.path.exists(package_path): raise exceptions.UserError('{} already exists'.format(package_path)) package_name = os.path.basename(os.path.normpath(package_path)) os.makedirs(package_path) # Write __init__.py with open(os.path.join(package_path, '__init__.py'), 'wb') as f: f.write('from {}.builder import xcessiv_ensemble'.format(package_name).encode('utf8')) # Create package baselearners with each base learner having its own module os.makedirs(os.path.join(package_path, 'baselearners')) open(os.path.join(package_path, 'baselearners', '__init__.py'), 'a').close() for idx, base_learner in enumerate(self.base_learners): base_learner.export_as_file(os.path.join(package_path, 'baselearners', 'baselearner' + str(idx))) # Create metalearner.py containing secondary learner self.base_learner_origin.export_as_file( os.path.join(package_path, 'metalearner'), self.secondary_learner_hyperparameters ) # Create cv.py containing CV method for getting meta-features with open(os.path.join(package_path, 'cv.py'), 'wb') as f: f.write(cv_source.encode('utf8')) # Create stacker.py containing class for Xcessiv ensemble ensemble_source = '' stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py') with open(stacker_file_loc) as f: ensemble_source += f.read() ensemble_source += '\n\n' \ ' def {}(self, X):\n' \ ' return self._process_using_' \ 'meta_feature_generator(X, "{}")\n\n'\ .format(self.base_learner_origin.meta_feature_generator, self.base_learner_origin.meta_feature_generator) with open(os.path.join(package_path, 'stacker.py'), 'wb') as f: f.write(ensemble_source.encode('utf8')) # Create builder.py containing file where `xcessiv_ensemble` is instantiated for import builder_source = '' for idx, base_learner in enumerate(self.base_learners): builder_source += 'from {}.baselearners import baselearner{}\n'.format(package_name, idx) builder_source += 'from {}.cv import return_splits_iterable\n'.format(package_name) builder_source += 'from {} import metalearner\n'.format(package_name) builder_source += 'from {}.stacker import XcessivStackedEnsemble\n'.format(package_name) builder_source += '\nbase_learners = [\n' for idx, base_learner in enumerate(self.base_learners): builder_source += ' baselearner{}.base_learner,\n'.format(idx) builder_source += ']\n' builder_source += '\nmeta_feature_generators = [\n' for idx, base_learner in enumerate(self.base_learners): builder_source += ' baselearner{}.meta_feature_generator,\n'.format(idx) builder_source += ']\n' builder_source += '\nxcessiv_ensemble = XcessivStackedEnsemble(base_learners=base_learners,' \ ' meta_feature_generators=meta_feature_generators,' \ ' secondary_learner=metalearner.base_learner,' \ ' cv_function=return_splits_iterable)\n' with open(os.path.join(package_path, 'builder.py'), 'wb') as f: f.write(builder_source.encode('utf8'))
[ "def", "export_as_package", "(", "self", ",", "package_path", ",", "cv_source", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "package_path", ")", ":", "raise", "exceptions", ".", "UserError", "(", "'{} already exists'", ".", "format", "(", "package_path", ")", ")", "package_name", "=", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "normpath", "(", "package_path", ")", ")", "os", ".", "makedirs", "(", "package_path", ")", "# Write __init__.py", "with", "open", "(", "os", ".", "path", ".", "join", "(", "package_path", ",", "'__init__.py'", ")", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "'from {}.builder import xcessiv_ensemble'", ".", "format", "(", "package_name", ")", ".", "encode", "(", "'utf8'", ")", ")", "# Create package baselearners with each base learner having its own module", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "package_path", ",", "'baselearners'", ")", ")", "open", "(", "os", ".", "path", ".", "join", "(", "package_path", ",", "'baselearners'", ",", "'__init__.py'", ")", ",", "'a'", ")", ".", "close", "(", ")", "for", "idx", ",", "base_learner", "in", "enumerate", "(", "self", ".", "base_learners", ")", ":", "base_learner", ".", "export_as_file", "(", "os", ".", "path", ".", "join", "(", "package_path", ",", "'baselearners'", ",", "'baselearner'", "+", "str", "(", "idx", ")", ")", ")", "# Create metalearner.py containing secondary learner", "self", ".", "base_learner_origin", ".", "export_as_file", "(", "os", ".", "path", ".", "join", "(", "package_path", ",", "'metalearner'", ")", ",", "self", ".", "secondary_learner_hyperparameters", ")", "# Create cv.py containing CV method for getting meta-features", "with", "open", "(", "os", ".", "path", ".", "join", "(", "package_path", ",", "'cv.py'", ")", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "cv_source", ".", "encode", "(", "'utf8'", ")", ")", "# Create stacker.py containing class for Xcessiv ensemble", "ensemble_source", "=", "''", "stacker_file_loc", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", ",", "'stacker.py'", ")", "with", "open", "(", "stacker_file_loc", ")", "as", "f", ":", "ensemble_source", "+=", "f", ".", "read", "(", ")", "ensemble_source", "+=", "'\\n\\n'", "' def {}(self, X):\\n'", "' return self._process_using_'", "'meta_feature_generator(X, \"{}\")\\n\\n'", ".", "format", "(", "self", ".", "base_learner_origin", ".", "meta_feature_generator", ",", "self", ".", "base_learner_origin", ".", "meta_feature_generator", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "package_path", ",", "'stacker.py'", ")", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "ensemble_source", ".", "encode", "(", "'utf8'", ")", ")", "# Create builder.py containing file where `xcessiv_ensemble` is instantiated for import", "builder_source", "=", "''", "for", "idx", ",", "base_learner", "in", "enumerate", "(", "self", ".", "base_learners", ")", ":", "builder_source", "+=", "'from {}.baselearners import baselearner{}\\n'", ".", "format", "(", "package_name", ",", "idx", ")", "builder_source", "+=", "'from {}.cv import return_splits_iterable\\n'", ".", "format", "(", "package_name", ")", "builder_source", "+=", "'from {} import metalearner\\n'", ".", "format", "(", "package_name", ")", "builder_source", "+=", "'from {}.stacker import XcessivStackedEnsemble\\n'", ".", "format", "(", "package_name", ")", "builder_source", "+=", "'\\nbase_learners = [\\n'", "for", "idx", ",", "base_learner", "in", "enumerate", "(", "self", ".", "base_learners", ")", ":", "builder_source", "+=", "' baselearner{}.base_learner,\\n'", ".", "format", "(", "idx", ")", "builder_source", "+=", "']\\n'", "builder_source", "+=", "'\\nmeta_feature_generators = [\\n'", "for", "idx", ",", "base_learner", "in", "enumerate", "(", "self", ".", "base_learners", ")", ":", "builder_source", "+=", "' baselearner{}.meta_feature_generator,\\n'", ".", "format", "(", "idx", ")", "builder_source", "+=", "']\\n'", "builder_source", "+=", "'\\nxcessiv_ensemble = XcessivStackedEnsemble(base_learners=base_learners,'", "' meta_feature_generators=meta_feature_generators,'", "' secondary_learner=metalearner.base_learner,'", "' cv_function=return_splits_iterable)\\n'", "with", "open", "(", "os", ".", "path", ".", "join", "(", "package_path", ",", "'builder.py'", ")", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "builder_source", ".", "encode", "(", "'utf8'", ")", ")" ]
Exports the ensemble as a Python package and saves it to `package_path`. Args: package_path (str, unicode): Absolute/local path of place to save package in cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features. Raises: exceptions.UserError: If os.path.join(path, name) already exists.
[ "Exports", "the", "ensemble", "as", "a", "Python", "package", "and", "saves", "it", "to", "package_path", "." ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L506-L591
train
reiinakano/xcessiv
xcessiv/views.py
verify_full_extraction
def verify_full_extraction(): """This is an experimental endpoint to simultaneously verify data statistics and extraction for training, test, and holdout datasets. With this, the other three verification methods will no longer be necessary. """ path = functions.get_path_from_query_string(request) if request.method == 'POST': rqtasks.extraction_data_statistics(path) with functions.DBContextManager(path) as session: extraction = session.query(models.Extraction).first() return jsonify(extraction.data_statistics)
python
def verify_full_extraction(): """This is an experimental endpoint to simultaneously verify data statistics and extraction for training, test, and holdout datasets. With this, the other three verification methods will no longer be necessary. """ path = functions.get_path_from_query_string(request) if request.method == 'POST': rqtasks.extraction_data_statistics(path) with functions.DBContextManager(path) as session: extraction = session.query(models.Extraction).first() return jsonify(extraction.data_statistics)
[ "def", "verify_full_extraction", "(", ")", ":", "path", "=", "functions", ".", "get_path_from_query_string", "(", "request", ")", "if", "request", ".", "method", "==", "'POST'", ":", "rqtasks", ".", "extraction_data_statistics", "(", "path", ")", "with", "functions", ".", "DBContextManager", "(", "path", ")", "as", "session", ":", "extraction", "=", "session", ".", "query", "(", "models", ".", "Extraction", ")", ".", "first", "(", ")", "return", "jsonify", "(", "extraction", ".", "data_statistics", ")" ]
This is an experimental endpoint to simultaneously verify data statistics and extraction for training, test, and holdout datasets. With this, the other three verification methods will no longer be necessary.
[ "This", "is", "an", "experimental", "endpoint", "to", "simultaneously", "verify", "data", "statistics", "and", "extraction", "for", "training", "test", "and", "holdout", "datasets", ".", "With", "this", "the", "other", "three", "verification", "methods", "will", "no", "longer", "be", "necessary", "." ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/views.py#L156-L169
train
reiinakano/xcessiv
xcessiv/views.py
create_base_learner
def create_base_learner(id): """This creates a single base learner from a base learner origin and queues it up""" path = functions.get_path_from_query_string(request) with functions.DBContextManager(path) as session: base_learner_origin = session.query(models.BaseLearnerOrigin).filter_by(id=id).first() if base_learner_origin is None: raise exceptions.UserError('Base learner origin {} not found'.format(id), 404) if not base_learner_origin.final: raise exceptions.UserError('Base learner origin {} is not final'.format(id)) req_body = request.get_json() # Retrieve full hyperparameters est = base_learner_origin.return_estimator() hyperparameters = functions.import_object_from_string_code(req_body['source'], 'params') est.set_params(**hyperparameters) hyperparameters = functions.make_serializable(est.get_params()) base_learners = session.query(models.BaseLearner).\ filter_by(base_learner_origin_id=id, hyperparameters=hyperparameters).all() if base_learners: raise exceptions.UserError('Base learner exists with given hyperparameters') base_learner = models.BaseLearner(hyperparameters, 'queued', base_learner_origin) if 'single_searches' not in base_learner_origin.description: base_learner_origin.description['single_searches'] = [] base_learner_origin.description['single_searches'] += ([req_body['source']]) session.add(base_learner) session.add(base_learner_origin) session.commit() with Connection(get_redis_connection()): rqtasks.generate_meta_features.delay(path, base_learner.id) return jsonify(base_learner.serialize)
python
def create_base_learner(id): """This creates a single base learner from a base learner origin and queues it up""" path = functions.get_path_from_query_string(request) with functions.DBContextManager(path) as session: base_learner_origin = session.query(models.BaseLearnerOrigin).filter_by(id=id).first() if base_learner_origin is None: raise exceptions.UserError('Base learner origin {} not found'.format(id), 404) if not base_learner_origin.final: raise exceptions.UserError('Base learner origin {} is not final'.format(id)) req_body = request.get_json() # Retrieve full hyperparameters est = base_learner_origin.return_estimator() hyperparameters = functions.import_object_from_string_code(req_body['source'], 'params') est.set_params(**hyperparameters) hyperparameters = functions.make_serializable(est.get_params()) base_learners = session.query(models.BaseLearner).\ filter_by(base_learner_origin_id=id, hyperparameters=hyperparameters).all() if base_learners: raise exceptions.UserError('Base learner exists with given hyperparameters') base_learner = models.BaseLearner(hyperparameters, 'queued', base_learner_origin) if 'single_searches' not in base_learner_origin.description: base_learner_origin.description['single_searches'] = [] base_learner_origin.description['single_searches'] += ([req_body['source']]) session.add(base_learner) session.add(base_learner_origin) session.commit() with Connection(get_redis_connection()): rqtasks.generate_meta_features.delay(path, base_learner.id) return jsonify(base_learner.serialize)
[ "def", "create_base_learner", "(", "id", ")", ":", "path", "=", "functions", ".", "get_path_from_query_string", "(", "request", ")", "with", "functions", ".", "DBContextManager", "(", "path", ")", "as", "session", ":", "base_learner_origin", "=", "session", ".", "query", "(", "models", ".", "BaseLearnerOrigin", ")", ".", "filter_by", "(", "id", "=", "id", ")", ".", "first", "(", ")", "if", "base_learner_origin", "is", "None", ":", "raise", "exceptions", ".", "UserError", "(", "'Base learner origin {} not found'", ".", "format", "(", "id", ")", ",", "404", ")", "if", "not", "base_learner_origin", ".", "final", ":", "raise", "exceptions", ".", "UserError", "(", "'Base learner origin {} is not final'", ".", "format", "(", "id", ")", ")", "req_body", "=", "request", ".", "get_json", "(", ")", "# Retrieve full hyperparameters", "est", "=", "base_learner_origin", ".", "return_estimator", "(", ")", "hyperparameters", "=", "functions", ".", "import_object_from_string_code", "(", "req_body", "[", "'source'", "]", ",", "'params'", ")", "est", ".", "set_params", "(", "*", "*", "hyperparameters", ")", "hyperparameters", "=", "functions", ".", "make_serializable", "(", "est", ".", "get_params", "(", ")", ")", "base_learners", "=", "session", ".", "query", "(", "models", ".", "BaseLearner", ")", ".", "filter_by", "(", "base_learner_origin_id", "=", "id", ",", "hyperparameters", "=", "hyperparameters", ")", ".", "all", "(", ")", "if", "base_learners", ":", "raise", "exceptions", ".", "UserError", "(", "'Base learner exists with given hyperparameters'", ")", "base_learner", "=", "models", ".", "BaseLearner", "(", "hyperparameters", ",", "'queued'", ",", "base_learner_origin", ")", "if", "'single_searches'", "not", "in", "base_learner_origin", ".", "description", ":", "base_learner_origin", ".", "description", "[", "'single_searches'", "]", "=", "[", "]", "base_learner_origin", ".", "description", "[", "'single_searches'", "]", "+=", "(", "[", "req_body", "[", "'source'", "]", "]", ")", "session", ".", "add", "(", "base_learner", ")", "session", ".", "add", "(", "base_learner_origin", ")", "session", ".", "commit", "(", ")", "with", "Connection", "(", "get_redis_connection", "(", ")", ")", ":", "rqtasks", ".", "generate_meta_features", ".", "delay", "(", "path", ",", "base_learner", ".", "id", ")", "return", "jsonify", "(", "base_learner", ".", "serialize", ")" ]
This creates a single base learner from a base learner origin and queues it up
[ "This", "creates", "a", "single", "base", "learner", "from", "a", "base", "learner", "origin", "and", "queues", "it", "up" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/views.py#L306-L348
train
reiinakano/xcessiv
xcessiv/views.py
search_base_learner
def search_base_learner(id): """Creates a set of base learners from base learner origin using grid search and queues them up """ path = functions.get_path_from_query_string(request) req_body = request.get_json() if req_body['method'] == 'grid': param_grid = functions.import_object_from_string_code( req_body['source'], 'param_grid' ) iterator = ParameterGrid(param_grid) elif req_body['method'] == 'random': param_distributions = functions.import_object_from_string_code( req_body['source'], 'param_distributions' ) iterator = ParameterSampler(param_distributions, n_iter=req_body['n_iter']) else: raise exceptions.UserError('{} not a valid search method'.format(req_body['method'])) with functions.DBContextManager(path) as session: base_learner_origin = session.query(models.BaseLearnerOrigin).filter_by(id=id).first() if base_learner_origin is None: raise exceptions.UserError('Base learner origin {} not found'.format(id), 404) if not base_learner_origin.final: raise exceptions.UserError('Base learner origin {} is not final'.format(id)) learners = [] for params in iterator: est = base_learner_origin.return_estimator() try: est.set_params(**params) except Exception as e: print(repr(e)) continue hyperparameters = functions.make_serializable(est.get_params()) base_learners = session.query(models.BaseLearner).\ filter_by(base_learner_origin_id=id, hyperparameters=hyperparameters).all() if base_learners: # already exists continue base_learner = models.BaseLearner(hyperparameters, 'queued', base_learner_origin) session.add(base_learner) session.commit() with Connection(get_redis_connection()): rqtasks.generate_meta_features.delay(path, base_learner.id) learners.append(base_learner) if not learners: raise exceptions.UserError('Created 0 new base learners') if req_body['method'] == 'grid': if 'grid_searches' not in base_learner_origin.description: base_learner_origin.description['grid_searches'] = [] base_learner_origin.description['grid_searches'] += ([req_body['source']]) elif req_body['method'] == 'random': if 'random_searches' not in base_learner_origin.description: base_learner_origin.description['random_searches'] = [] base_learner_origin.description['random_searches'] += ([req_body['source']]) session.add(base_learner_origin) session.commit() return jsonify(list(map(lambda x: x.serialize, learners)))
python
def search_base_learner(id): """Creates a set of base learners from base learner origin using grid search and queues them up """ path = functions.get_path_from_query_string(request) req_body = request.get_json() if req_body['method'] == 'grid': param_grid = functions.import_object_from_string_code( req_body['source'], 'param_grid' ) iterator = ParameterGrid(param_grid) elif req_body['method'] == 'random': param_distributions = functions.import_object_from_string_code( req_body['source'], 'param_distributions' ) iterator = ParameterSampler(param_distributions, n_iter=req_body['n_iter']) else: raise exceptions.UserError('{} not a valid search method'.format(req_body['method'])) with functions.DBContextManager(path) as session: base_learner_origin = session.query(models.BaseLearnerOrigin).filter_by(id=id).first() if base_learner_origin is None: raise exceptions.UserError('Base learner origin {} not found'.format(id), 404) if not base_learner_origin.final: raise exceptions.UserError('Base learner origin {} is not final'.format(id)) learners = [] for params in iterator: est = base_learner_origin.return_estimator() try: est.set_params(**params) except Exception as e: print(repr(e)) continue hyperparameters = functions.make_serializable(est.get_params()) base_learners = session.query(models.BaseLearner).\ filter_by(base_learner_origin_id=id, hyperparameters=hyperparameters).all() if base_learners: # already exists continue base_learner = models.BaseLearner(hyperparameters, 'queued', base_learner_origin) session.add(base_learner) session.commit() with Connection(get_redis_connection()): rqtasks.generate_meta_features.delay(path, base_learner.id) learners.append(base_learner) if not learners: raise exceptions.UserError('Created 0 new base learners') if req_body['method'] == 'grid': if 'grid_searches' not in base_learner_origin.description: base_learner_origin.description['grid_searches'] = [] base_learner_origin.description['grid_searches'] += ([req_body['source']]) elif req_body['method'] == 'random': if 'random_searches' not in base_learner_origin.description: base_learner_origin.description['random_searches'] = [] base_learner_origin.description['random_searches'] += ([req_body['source']]) session.add(base_learner_origin) session.commit() return jsonify(list(map(lambda x: x.serialize, learners)))
[ "def", "search_base_learner", "(", "id", ")", ":", "path", "=", "functions", ".", "get_path_from_query_string", "(", "request", ")", "req_body", "=", "request", ".", "get_json", "(", ")", "if", "req_body", "[", "'method'", "]", "==", "'grid'", ":", "param_grid", "=", "functions", ".", "import_object_from_string_code", "(", "req_body", "[", "'source'", "]", ",", "'param_grid'", ")", "iterator", "=", "ParameterGrid", "(", "param_grid", ")", "elif", "req_body", "[", "'method'", "]", "==", "'random'", ":", "param_distributions", "=", "functions", ".", "import_object_from_string_code", "(", "req_body", "[", "'source'", "]", ",", "'param_distributions'", ")", "iterator", "=", "ParameterSampler", "(", "param_distributions", ",", "n_iter", "=", "req_body", "[", "'n_iter'", "]", ")", "else", ":", "raise", "exceptions", ".", "UserError", "(", "'{} not a valid search method'", ".", "format", "(", "req_body", "[", "'method'", "]", ")", ")", "with", "functions", ".", "DBContextManager", "(", "path", ")", "as", "session", ":", "base_learner_origin", "=", "session", ".", "query", "(", "models", ".", "BaseLearnerOrigin", ")", ".", "filter_by", "(", "id", "=", "id", ")", ".", "first", "(", ")", "if", "base_learner_origin", "is", "None", ":", "raise", "exceptions", ".", "UserError", "(", "'Base learner origin {} not found'", ".", "format", "(", "id", ")", ",", "404", ")", "if", "not", "base_learner_origin", ".", "final", ":", "raise", "exceptions", ".", "UserError", "(", "'Base learner origin {} is not final'", ".", "format", "(", "id", ")", ")", "learners", "=", "[", "]", "for", "params", "in", "iterator", ":", "est", "=", "base_learner_origin", ".", "return_estimator", "(", ")", "try", ":", "est", ".", "set_params", "(", "*", "*", "params", ")", "except", "Exception", "as", "e", ":", "print", "(", "repr", "(", "e", ")", ")", "continue", "hyperparameters", "=", "functions", ".", "make_serializable", "(", "est", ".", "get_params", "(", ")", ")", "base_learners", "=", "session", ".", "query", "(", "models", ".", "BaseLearner", ")", ".", "filter_by", "(", "base_learner_origin_id", "=", "id", ",", "hyperparameters", "=", "hyperparameters", ")", ".", "all", "(", ")", "if", "base_learners", ":", "# already exists", "continue", "base_learner", "=", "models", ".", "BaseLearner", "(", "hyperparameters", ",", "'queued'", ",", "base_learner_origin", ")", "session", ".", "add", "(", "base_learner", ")", "session", ".", "commit", "(", ")", "with", "Connection", "(", "get_redis_connection", "(", ")", ")", ":", "rqtasks", ".", "generate_meta_features", ".", "delay", "(", "path", ",", "base_learner", ".", "id", ")", "learners", ".", "append", "(", "base_learner", ")", "if", "not", "learners", ":", "raise", "exceptions", ".", "UserError", "(", "'Created 0 new base learners'", ")", "if", "req_body", "[", "'method'", "]", "==", "'grid'", ":", "if", "'grid_searches'", "not", "in", "base_learner_origin", ".", "description", ":", "base_learner_origin", ".", "description", "[", "'grid_searches'", "]", "=", "[", "]", "base_learner_origin", ".", "description", "[", "'grid_searches'", "]", "+=", "(", "[", "req_body", "[", "'source'", "]", "]", ")", "elif", "req_body", "[", "'method'", "]", "==", "'random'", ":", "if", "'random_searches'", "not", "in", "base_learner_origin", ".", "description", ":", "base_learner_origin", ".", "description", "[", "'random_searches'", "]", "=", "[", "]", "base_learner_origin", ".", "description", "[", "'random_searches'", "]", "+=", "(", "[", "req_body", "[", "'source'", "]", "]", ")", "session", ".", "add", "(", "base_learner_origin", ")", "session", ".", "commit", "(", ")", "return", "jsonify", "(", "list", "(", "map", "(", "lambda", "x", ":", "x", ".", "serialize", ",", "learners", ")", ")", ")" ]
Creates a set of base learners from base learner origin using grid search and queues them up
[ "Creates", "a", "set", "of", "base", "learners", "from", "base", "learner", "origin", "using", "grid", "search", "and", "queues", "them", "up" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/views.py#L352-L424
train
reiinakano/xcessiv
xcessiv/views.py
get_automated_runs
def get_automated_runs(): """Return all automated runs""" path = functions.get_path_from_query_string(request) if request.method == 'GET': with functions.DBContextManager(path) as session: automated_runs = session.query(models.AutomatedRun).all() return jsonify(list(map(lambda x: x.serialize, automated_runs))) if request.method == 'POST': req_body = request.get_json() with functions.DBContextManager(path) as session: base_learner_origin = None if req_body['category'] == 'bayes' or req_body['category'] == 'greedy_ensemble_search': base_learner_origin = session.query(models.BaseLearnerOrigin).\ filter_by(id=req_body['base_learner_origin_id']).first() if base_learner_origin is None: raise exceptions.UserError('Base learner origin {} not found'.format( req_body['base_learner_origin_id'] ), 404) if not base_learner_origin.final: raise exceptions.UserError('Base learner origin {} is not final'.format( req_body['base_learner_origin_id'] )) elif req_body['category'] == 'tpot': pass else: raise exceptions.UserError('Automated run category' ' {} not recognized'.format(req_body['category'])) # Check for any syntax errors module = functions.import_string_code_as_module(req_body['source']) del module automated_run = models.AutomatedRun(req_body['source'], 'queued', req_body['category'], base_learner_origin) session.add(automated_run) session.commit() with Connection(get_redis_connection()): rqtasks.start_automated_run.delay(path, automated_run.id) return jsonify(automated_run.serialize)
python
def get_automated_runs(): """Return all automated runs""" path = functions.get_path_from_query_string(request) if request.method == 'GET': with functions.DBContextManager(path) as session: automated_runs = session.query(models.AutomatedRun).all() return jsonify(list(map(lambda x: x.serialize, automated_runs))) if request.method == 'POST': req_body = request.get_json() with functions.DBContextManager(path) as session: base_learner_origin = None if req_body['category'] == 'bayes' or req_body['category'] == 'greedy_ensemble_search': base_learner_origin = session.query(models.BaseLearnerOrigin).\ filter_by(id=req_body['base_learner_origin_id']).first() if base_learner_origin is None: raise exceptions.UserError('Base learner origin {} not found'.format( req_body['base_learner_origin_id'] ), 404) if not base_learner_origin.final: raise exceptions.UserError('Base learner origin {} is not final'.format( req_body['base_learner_origin_id'] )) elif req_body['category'] == 'tpot': pass else: raise exceptions.UserError('Automated run category' ' {} not recognized'.format(req_body['category'])) # Check for any syntax errors module = functions.import_string_code_as_module(req_body['source']) del module automated_run = models.AutomatedRun(req_body['source'], 'queued', req_body['category'], base_learner_origin) session.add(automated_run) session.commit() with Connection(get_redis_connection()): rqtasks.start_automated_run.delay(path, automated_run.id) return jsonify(automated_run.serialize)
[ "def", "get_automated_runs", "(", ")", ":", "path", "=", "functions", ".", "get_path_from_query_string", "(", "request", ")", "if", "request", ".", "method", "==", "'GET'", ":", "with", "functions", ".", "DBContextManager", "(", "path", ")", "as", "session", ":", "automated_runs", "=", "session", ".", "query", "(", "models", ".", "AutomatedRun", ")", ".", "all", "(", ")", "return", "jsonify", "(", "list", "(", "map", "(", "lambda", "x", ":", "x", ".", "serialize", ",", "automated_runs", ")", ")", ")", "if", "request", ".", "method", "==", "'POST'", ":", "req_body", "=", "request", ".", "get_json", "(", ")", "with", "functions", ".", "DBContextManager", "(", "path", ")", "as", "session", ":", "base_learner_origin", "=", "None", "if", "req_body", "[", "'category'", "]", "==", "'bayes'", "or", "req_body", "[", "'category'", "]", "==", "'greedy_ensemble_search'", ":", "base_learner_origin", "=", "session", ".", "query", "(", "models", ".", "BaseLearnerOrigin", ")", ".", "filter_by", "(", "id", "=", "req_body", "[", "'base_learner_origin_id'", "]", ")", ".", "first", "(", ")", "if", "base_learner_origin", "is", "None", ":", "raise", "exceptions", ".", "UserError", "(", "'Base learner origin {} not found'", ".", "format", "(", "req_body", "[", "'base_learner_origin_id'", "]", ")", ",", "404", ")", "if", "not", "base_learner_origin", ".", "final", ":", "raise", "exceptions", ".", "UserError", "(", "'Base learner origin {} is not final'", ".", "format", "(", "req_body", "[", "'base_learner_origin_id'", "]", ")", ")", "elif", "req_body", "[", "'category'", "]", "==", "'tpot'", ":", "pass", "else", ":", "raise", "exceptions", ".", "UserError", "(", "'Automated run category'", "' {} not recognized'", ".", "format", "(", "req_body", "[", "'category'", "]", ")", ")", "# Check for any syntax errors", "module", "=", "functions", ".", "import_string_code_as_module", "(", "req_body", "[", "'source'", "]", ")", "del", "module", "automated_run", "=", "models", ".", "AutomatedRun", "(", "req_body", "[", "'source'", "]", ",", "'queued'", ",", "req_body", "[", "'category'", "]", ",", "base_learner_origin", ")", "session", ".", "add", "(", "automated_run", ")", "session", ".", "commit", "(", ")", "with", "Connection", "(", "get_redis_connection", "(", ")", ")", ":", "rqtasks", ".", "start_automated_run", ".", "delay", "(", "path", ",", "automated_run", ".", "id", ")", "return", "jsonify", "(", "automated_run", ".", "serialize", ")" ]
Return all automated runs
[ "Return", "all", "automated", "runs" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/views.py#L428-L476
train
reiinakano/xcessiv
xcessiv/stacker.py
XcessivStackedEnsemble._process_using_meta_feature_generator
def _process_using_meta_feature_generator(self, X, meta_feature_generator): """Process using secondary learner meta-feature generator Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba, this internal method gives the ability to use any string. Just make sure secondary learner has the method. Args: X (array-like): Features array meta_feature_generator (str, unicode): Method for use by secondary learner """ all_learner_meta_features = [] for idx, base_learner in enumerate(self.base_learners): single_learner_meta_features = getattr(base_learner, self.meta_feature_generators[idx])(X) if len(single_learner_meta_features.shape) == 1: single_learner_meta_features = single_learner_meta_features.reshape(-1, 1) all_learner_meta_features.append(single_learner_meta_features) all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1) out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features) return out
python
def _process_using_meta_feature_generator(self, X, meta_feature_generator): """Process using secondary learner meta-feature generator Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba, this internal method gives the ability to use any string. Just make sure secondary learner has the method. Args: X (array-like): Features array meta_feature_generator (str, unicode): Method for use by secondary learner """ all_learner_meta_features = [] for idx, base_learner in enumerate(self.base_learners): single_learner_meta_features = getattr(base_learner, self.meta_feature_generators[idx])(X) if len(single_learner_meta_features.shape) == 1: single_learner_meta_features = single_learner_meta_features.reshape(-1, 1) all_learner_meta_features.append(single_learner_meta_features) all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1) out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features) return out
[ "def", "_process_using_meta_feature_generator", "(", "self", ",", "X", ",", "meta_feature_generator", ")", ":", "all_learner_meta_features", "=", "[", "]", "for", "idx", ",", "base_learner", "in", "enumerate", "(", "self", ".", "base_learners", ")", ":", "single_learner_meta_features", "=", "getattr", "(", "base_learner", ",", "self", ".", "meta_feature_generators", "[", "idx", "]", ")", "(", "X", ")", "if", "len", "(", "single_learner_meta_features", ".", "shape", ")", "==", "1", ":", "single_learner_meta_features", "=", "single_learner_meta_features", ".", "reshape", "(", "-", "1", ",", "1", ")", "all_learner_meta_features", ".", "append", "(", "single_learner_meta_features", ")", "all_learner_meta_features", "=", "np", ".", "concatenate", "(", "all_learner_meta_features", ",", "axis", "=", "1", ")", "out", "=", "getattr", "(", "self", ".", "secondary_learner", ",", "meta_feature_generator", ")", "(", "all_learner_meta_features", ")", "return", "out" ]
Process using secondary learner meta-feature generator Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba, this internal method gives the ability to use any string. Just make sure secondary learner has the method. Args: X (array-like): Features array meta_feature_generator (str, unicode): Method for use by secondary learner
[ "Process", "using", "secondary", "learner", "meta", "-", "feature", "generator" ]
a48dff7d370c84eb5c243bde87164c1f5fd096d5
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/stacker.py#L77-L103
train
madedotcom/photon-pump
photonpump/messages.py
NewEvent
def NewEvent( type: str, id: UUID = None, data: JsonDict = None, metadata: JsonDict = None ) -> NewEventData: """Build the data structure for a new event. Args: type: An event type. id: The uuid identifier for the event. data: A dict containing data for the event. These data must be json serializable. metadata: A dict containing metadata about the event. These must be json serializable. """ return NewEventData(id or uuid4(), type, data, metadata)
python
def NewEvent( type: str, id: UUID = None, data: JsonDict = None, metadata: JsonDict = None ) -> NewEventData: """Build the data structure for a new event. Args: type: An event type. id: The uuid identifier for the event. data: A dict containing data for the event. These data must be json serializable. metadata: A dict containing metadata about the event. These must be json serializable. """ return NewEventData(id or uuid4(), type, data, metadata)
[ "def", "NewEvent", "(", "type", ":", "str", ",", "id", ":", "UUID", "=", "None", ",", "data", ":", "JsonDict", "=", "None", ",", "metadata", ":", "JsonDict", "=", "None", ")", "->", "NewEventData", ":", "return", "NewEventData", "(", "id", "or", "uuid4", "(", ")", ",", "type", ",", "data", ",", "metadata", ")" ]
Build the data structure for a new event. Args: type: An event type. id: The uuid identifier for the event. data: A dict containing data for the event. These data must be json serializable. metadata: A dict containing metadata about the event. These must be json serializable.
[ "Build", "the", "data", "structure", "for", "a", "new", "event", "." ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/messages.py#L439-L453
train
madedotcom/photon-pump
photonpump/messages.py
Credential.from_bytes
def from_bytes(cls, data): """ I am so sorry. """ len_username = int.from_bytes(data[0:2], byteorder="big") offset_username = 2 + len_username username = data[2:offset_username].decode("UTF-8") offset_password = 2 + offset_username len_password = int.from_bytes( data[offset_username:offset_password], byteorder="big" ) pass_begin = offset_password pass_end = offset_password + len_password password = data[pass_begin:pass_end].decode("UTF-8") return cls(username, password)
python
def from_bytes(cls, data): """ I am so sorry. """ len_username = int.from_bytes(data[0:2], byteorder="big") offset_username = 2 + len_username username = data[2:offset_username].decode("UTF-8") offset_password = 2 + offset_username len_password = int.from_bytes( data[offset_username:offset_password], byteorder="big" ) pass_begin = offset_password pass_end = offset_password + len_password password = data[pass_begin:pass_end].decode("UTF-8") return cls(username, password)
[ "def", "from_bytes", "(", "cls", ",", "data", ")", ":", "len_username", "=", "int", ".", "from_bytes", "(", "data", "[", "0", ":", "2", "]", ",", "byteorder", "=", "\"big\"", ")", "offset_username", "=", "2", "+", "len_username", "username", "=", "data", "[", "2", ":", "offset_username", "]", ".", "decode", "(", "\"UTF-8\"", ")", "offset_password", "=", "2", "+", "offset_username", "len_password", "=", "int", ".", "from_bytes", "(", "data", "[", "offset_username", ":", "offset_password", "]", ",", "byteorder", "=", "\"big\"", ")", "pass_begin", "=", "offset_password", "pass_end", "=", "offset_password", "+", "len_password", "password", "=", "data", "[", "pass_begin", ":", "pass_end", "]", ".", "decode", "(", "\"UTF-8\"", ")", "return", "cls", "(", "username", ",", "password", ")" ]
I am so sorry.
[ "I", "am", "so", "sorry", "." ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/messages.py#L155-L170
train
madedotcom/photon-pump
photonpump/connection.py
connect
def connect( host="localhost", port=1113, discovery_host=None, discovery_port=2113, username=None, password=None, loop=None, name=None, selector=select_random, ) -> Client: """ Create a new client. Examples: Since the Client is an async context manager, we can use it in a with block for automatic connect/disconnect semantics. >>> async with connect(host='127.0.0.1', port=1113) as c: >>> await c.ping() Or we can call connect at a more convenient moment >>> c = connect() >>> await c.connect() >>> await c.ping() >>> await c.close() For cluster discovery cases, we can provide a discovery host and port. The host may be an IP or DNS entry. If you provide a DNS entry, discovery will choose randomly from the registered IP addresses for the hostname. >>> async with connect(discovery_host="eventstore.test") as c: >>> await c.ping() The discovery host returns gossip data about the cluster. We use the gossip to select a node at random from the avaialble cluster members. If you're using :meth:`persistent subscriptions <photonpump.connection.Client.create_subscription>` you will always want to connect to the master node of the cluster. The selector parameter is a function that chooses an available node from the gossip result. To select the master node, use the :func:`photonpump.discovery.prefer_master` function. This function will return the master node if there is a live master, and a random replica otherwise. All requests to the server can be made with the require_master flag which will raise an error if the current node is not a master. >>> async with connect( >>> discovery_host="eventstore.test", >>> selector=discovery.prefer_master, >>> ) as c: >>> await c.ping(require_master=True) Conversely, you might want to avoid connecting to the master node for reasons of scalability. For this you can use the :func:`photonpump.discovery.prefer_replica` function. >>> async with connect( >>> discovery_host="eventstore.test", >>> selector=discovery.prefer_replica, >>> ) as c: >>> await c.ping() For some operations, you may need to authenticate your requests by providing a username and password to the client. >>> async with connect(username='admin', password='changeit') as c: >>> await c.ping() Ordinarily you will create a single Client per application, but for advanced scenarios you might want multiple connections. In this situation, you can name each connection in order to get better logging. >>> async with connect(name="event-reader"): >>> await c.ping() >>> async with connect(name="event-writer"): >>> await c.ping() Args: host: The IP or DNS entry to connect with, defaults to 'localhost'. port: The port to connect with, defaults to 1113. discovery_host: The IP or DNS entry to use for cluster discovery. discovery_port: The port to use for cluster discovery, defaults to 2113. username: The username to use when communicating with eventstore. password: The password to use when communicating with eventstore. loop:An Asyncio event loop. selector: An optional function that selects one element from a list of :class:`photonpump.disovery.DiscoveredNode` elements. """ discovery = get_discoverer(host, port, discovery_host, discovery_port, selector) dispatcher = MessageDispatcher(name=name, loop=loop) connector = Connector(discovery, dispatcher, name=name) credential = msg.Credential(username, password) if username and password else None return Client(connector, dispatcher, credential=credential)
python
def connect( host="localhost", port=1113, discovery_host=None, discovery_port=2113, username=None, password=None, loop=None, name=None, selector=select_random, ) -> Client: """ Create a new client. Examples: Since the Client is an async context manager, we can use it in a with block for automatic connect/disconnect semantics. >>> async with connect(host='127.0.0.1', port=1113) as c: >>> await c.ping() Or we can call connect at a more convenient moment >>> c = connect() >>> await c.connect() >>> await c.ping() >>> await c.close() For cluster discovery cases, we can provide a discovery host and port. The host may be an IP or DNS entry. If you provide a DNS entry, discovery will choose randomly from the registered IP addresses for the hostname. >>> async with connect(discovery_host="eventstore.test") as c: >>> await c.ping() The discovery host returns gossip data about the cluster. We use the gossip to select a node at random from the avaialble cluster members. If you're using :meth:`persistent subscriptions <photonpump.connection.Client.create_subscription>` you will always want to connect to the master node of the cluster. The selector parameter is a function that chooses an available node from the gossip result. To select the master node, use the :func:`photonpump.discovery.prefer_master` function. This function will return the master node if there is a live master, and a random replica otherwise. All requests to the server can be made with the require_master flag which will raise an error if the current node is not a master. >>> async with connect( >>> discovery_host="eventstore.test", >>> selector=discovery.prefer_master, >>> ) as c: >>> await c.ping(require_master=True) Conversely, you might want to avoid connecting to the master node for reasons of scalability. For this you can use the :func:`photonpump.discovery.prefer_replica` function. >>> async with connect( >>> discovery_host="eventstore.test", >>> selector=discovery.prefer_replica, >>> ) as c: >>> await c.ping() For some operations, you may need to authenticate your requests by providing a username and password to the client. >>> async with connect(username='admin', password='changeit') as c: >>> await c.ping() Ordinarily you will create a single Client per application, but for advanced scenarios you might want multiple connections. In this situation, you can name each connection in order to get better logging. >>> async with connect(name="event-reader"): >>> await c.ping() >>> async with connect(name="event-writer"): >>> await c.ping() Args: host: The IP or DNS entry to connect with, defaults to 'localhost'. port: The port to connect with, defaults to 1113. discovery_host: The IP or DNS entry to use for cluster discovery. discovery_port: The port to use for cluster discovery, defaults to 2113. username: The username to use when communicating with eventstore. password: The password to use when communicating with eventstore. loop:An Asyncio event loop. selector: An optional function that selects one element from a list of :class:`photonpump.disovery.DiscoveredNode` elements. """ discovery = get_discoverer(host, port, discovery_host, discovery_port, selector) dispatcher = MessageDispatcher(name=name, loop=loop) connector = Connector(discovery, dispatcher, name=name) credential = msg.Credential(username, password) if username and password else None return Client(connector, dispatcher, credential=credential)
[ "def", "connect", "(", "host", "=", "\"localhost\"", ",", "port", "=", "1113", ",", "discovery_host", "=", "None", ",", "discovery_port", "=", "2113", ",", "username", "=", "None", ",", "password", "=", "None", ",", "loop", "=", "None", ",", "name", "=", "None", ",", "selector", "=", "select_random", ",", ")", "->", "Client", ":", "discovery", "=", "get_discoverer", "(", "host", ",", "port", ",", "discovery_host", ",", "discovery_port", ",", "selector", ")", "dispatcher", "=", "MessageDispatcher", "(", "name", "=", "name", ",", "loop", "=", "loop", ")", "connector", "=", "Connector", "(", "discovery", ",", "dispatcher", ",", "name", "=", "name", ")", "credential", "=", "msg", ".", "Credential", "(", "username", ",", "password", ")", "if", "username", "and", "password", "else", "None", "return", "Client", "(", "connector", ",", "dispatcher", ",", "credential", "=", "credential", ")" ]
Create a new client. Examples: Since the Client is an async context manager, we can use it in a with block for automatic connect/disconnect semantics. >>> async with connect(host='127.0.0.1', port=1113) as c: >>> await c.ping() Or we can call connect at a more convenient moment >>> c = connect() >>> await c.connect() >>> await c.ping() >>> await c.close() For cluster discovery cases, we can provide a discovery host and port. The host may be an IP or DNS entry. If you provide a DNS entry, discovery will choose randomly from the registered IP addresses for the hostname. >>> async with connect(discovery_host="eventstore.test") as c: >>> await c.ping() The discovery host returns gossip data about the cluster. We use the gossip to select a node at random from the avaialble cluster members. If you're using :meth:`persistent subscriptions <photonpump.connection.Client.create_subscription>` you will always want to connect to the master node of the cluster. The selector parameter is a function that chooses an available node from the gossip result. To select the master node, use the :func:`photonpump.discovery.prefer_master` function. This function will return the master node if there is a live master, and a random replica otherwise. All requests to the server can be made with the require_master flag which will raise an error if the current node is not a master. >>> async with connect( >>> discovery_host="eventstore.test", >>> selector=discovery.prefer_master, >>> ) as c: >>> await c.ping(require_master=True) Conversely, you might want to avoid connecting to the master node for reasons of scalability. For this you can use the :func:`photonpump.discovery.prefer_replica` function. >>> async with connect( >>> discovery_host="eventstore.test", >>> selector=discovery.prefer_replica, >>> ) as c: >>> await c.ping() For some operations, you may need to authenticate your requests by providing a username and password to the client. >>> async with connect(username='admin', password='changeit') as c: >>> await c.ping() Ordinarily you will create a single Client per application, but for advanced scenarios you might want multiple connections. In this situation, you can name each connection in order to get better logging. >>> async with connect(name="event-reader"): >>> await c.ping() >>> async with connect(name="event-writer"): >>> await c.ping() Args: host: The IP or DNS entry to connect with, defaults to 'localhost'. port: The port to connect with, defaults to 1113. discovery_host: The IP or DNS entry to use for cluster discovery. discovery_port: The port to use for cluster discovery, defaults to 2113. username: The username to use when communicating with eventstore. password: The password to use when communicating with eventstore. loop:An Asyncio event loop. selector: An optional function that selects one element from a list of :class:`photonpump.disovery.DiscoveredNode` elements.
[ "Create", "a", "new", "client", "." ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L1190-L1290
train
madedotcom/photon-pump
photonpump/connection.py
MessageReader.start
async def start(self): """Loop forever reading messages and invoking the operation that caused them""" while True: try: data = await self.reader.read(8192) if self._trace_enabled: self._logger.trace( "Received %d bytes from remote server:\n%s", len(data), msg.dump(data), ) await self.process(data) except asyncio.CancelledError: return except: logging.exception("Unhandled error in Message Reader") raise
python
async def start(self): """Loop forever reading messages and invoking the operation that caused them""" while True: try: data = await self.reader.read(8192) if self._trace_enabled: self._logger.trace( "Received %d bytes from remote server:\n%s", len(data), msg.dump(data), ) await self.process(data) except asyncio.CancelledError: return except: logging.exception("Unhandled error in Message Reader") raise
[ "async", "def", "start", "(", "self", ")", ":", "while", "True", ":", "try", ":", "data", "=", "await", "self", ".", "reader", ".", "read", "(", "8192", ")", "if", "self", ".", "_trace_enabled", ":", "self", ".", "_logger", ".", "trace", "(", "\"Received %d bytes from remote server:\\n%s\"", ",", "len", "(", "data", ")", ",", "msg", ".", "dump", "(", "data", ")", ",", ")", "await", "self", ".", "process", "(", "data", ")", "except", "asyncio", ".", "CancelledError", ":", "return", "except", ":", "logging", ".", "exception", "(", "\"Unhandled error in Message Reader\"", ")", "raise" ]
Loop forever reading messages and invoking the operation that caused them
[ "Loop", "forever", "reading", "messages", "and", "invoking", "the", "operation", "that", "caused", "them" ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L397-L416
train
madedotcom/photon-pump
photonpump/connection.py
Client.ping
async def ping(self, conversation_id: uuid.UUID = None) -> float: """ Send a message to the remote server to check liveness. Returns: The round-trip time to receive a Pong message in fractional seconds Examples: >>> async with connect() as conn: >>> print("Sending a PING to the server") >>> time_secs = await conn.ping() >>> print("Received a PONG after {} secs".format(time_secs)) """ cmd = convo.Ping(conversation_id=conversation_id or uuid.uuid4()) result = await self.dispatcher.start_conversation(cmd) return await result
python
async def ping(self, conversation_id: uuid.UUID = None) -> float: """ Send a message to the remote server to check liveness. Returns: The round-trip time to receive a Pong message in fractional seconds Examples: >>> async with connect() as conn: >>> print("Sending a PING to the server") >>> time_secs = await conn.ping() >>> print("Received a PONG after {} secs".format(time_secs)) """ cmd = convo.Ping(conversation_id=conversation_id or uuid.uuid4()) result = await self.dispatcher.start_conversation(cmd) return await result
[ "async", "def", "ping", "(", "self", ",", "conversation_id", ":", "uuid", ".", "UUID", "=", "None", ")", "->", "float", ":", "cmd", "=", "convo", ".", "Ping", "(", "conversation_id", "=", "conversation_id", "or", "uuid", ".", "uuid4", "(", ")", ")", "result", "=", "await", "self", ".", "dispatcher", ".", "start_conversation", "(", "cmd", ")", "return", "await", "result" ]
Send a message to the remote server to check liveness. Returns: The round-trip time to receive a Pong message in fractional seconds Examples: >>> async with connect() as conn: >>> print("Sending a PING to the server") >>> time_secs = await conn.ping() >>> print("Received a PONG after {} secs".format(time_secs))
[ "Send", "a", "message", "to", "the", "remote", "server", "to", "check", "liveness", "." ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L581-L599
train
madedotcom/photon-pump
photonpump/connection.py
Client.publish_event
async def publish_event( self, stream: str, type: str, body: Optional[Any] = None, id: Optional[uuid.UUID] = None, metadata: Optional[Any] = None, expected_version: int = -2, require_master: bool = False, ) -> None: """ Publish a single event to the EventStore. This method publishes a single event to the remote server and waits for acknowledgement. Args: stream: The stream to publish the event to. type: the event's type. body: a serializable body for the event. id: a unique id for the event. PhotonPump will automatically generate an id if none is provided. metadata: Optional serializable metadata block for the event. expected_version: Used for concurrency control. If a positive integer is provided, EventStore will check that the stream is at that version before accepting a write. There are three magic values: -4: StreamMustExist. Checks that the stream already exists. -2: Any. Disables concurrency checks -1: NoStream. Checks that the stream does not yet exist. 0: EmptyStream. Checks that the stream has been explicitly created but does not yet contain any events. require_master: If true, slave nodes will reject this message. Examples: >>> async with connect() as conn: >>> await conn.publish_event( >>> "inventory_item-1", >>> "item_created", >>> body={ "item-id": 1, "created-date": "2018-08-19" }, >>> expected_version=ExpectedVersion.StreamMustNotExist >>> ) >>> >>> await conn.publish_event( >>> "inventory_item-1", >>> "item_deleted", >>> expected_version=1, >>> metadata={'deleted-by': 'bob' } >>> ) """ event = msg.NewEvent(type, id or uuid.uuid4(), body, metadata) conversation = convo.WriteEvents( stream, [event], expected_version=expected_version, require_master=require_master, ) result = await self.dispatcher.start_conversation(conversation) return await result
python
async def publish_event( self, stream: str, type: str, body: Optional[Any] = None, id: Optional[uuid.UUID] = None, metadata: Optional[Any] = None, expected_version: int = -2, require_master: bool = False, ) -> None: """ Publish a single event to the EventStore. This method publishes a single event to the remote server and waits for acknowledgement. Args: stream: The stream to publish the event to. type: the event's type. body: a serializable body for the event. id: a unique id for the event. PhotonPump will automatically generate an id if none is provided. metadata: Optional serializable metadata block for the event. expected_version: Used for concurrency control. If a positive integer is provided, EventStore will check that the stream is at that version before accepting a write. There are three magic values: -4: StreamMustExist. Checks that the stream already exists. -2: Any. Disables concurrency checks -1: NoStream. Checks that the stream does not yet exist. 0: EmptyStream. Checks that the stream has been explicitly created but does not yet contain any events. require_master: If true, slave nodes will reject this message. Examples: >>> async with connect() as conn: >>> await conn.publish_event( >>> "inventory_item-1", >>> "item_created", >>> body={ "item-id": 1, "created-date": "2018-08-19" }, >>> expected_version=ExpectedVersion.StreamMustNotExist >>> ) >>> >>> await conn.publish_event( >>> "inventory_item-1", >>> "item_deleted", >>> expected_version=1, >>> metadata={'deleted-by': 'bob' } >>> ) """ event = msg.NewEvent(type, id or uuid.uuid4(), body, metadata) conversation = convo.WriteEvents( stream, [event], expected_version=expected_version, require_master=require_master, ) result = await self.dispatcher.start_conversation(conversation) return await result
[ "async", "def", "publish_event", "(", "self", ",", "stream", ":", "str", ",", "type", ":", "str", ",", "body", ":", "Optional", "[", "Any", "]", "=", "None", ",", "id", ":", "Optional", "[", "uuid", ".", "UUID", "]", "=", "None", ",", "metadata", ":", "Optional", "[", "Any", "]", "=", "None", ",", "expected_version", ":", "int", "=", "-", "2", ",", "require_master", ":", "bool", "=", "False", ",", ")", "->", "None", ":", "event", "=", "msg", ".", "NewEvent", "(", "type", ",", "id", "or", "uuid", ".", "uuid4", "(", ")", ",", "body", ",", "metadata", ")", "conversation", "=", "convo", ".", "WriteEvents", "(", "stream", ",", "[", "event", "]", ",", "expected_version", "=", "expected_version", ",", "require_master", "=", "require_master", ",", ")", "result", "=", "await", "self", ".", "dispatcher", ".", "start_conversation", "(", "conversation", ")", "return", "await", "result" ]
Publish a single event to the EventStore. This method publishes a single event to the remote server and waits for acknowledgement. Args: stream: The stream to publish the event to. type: the event's type. body: a serializable body for the event. id: a unique id for the event. PhotonPump will automatically generate an id if none is provided. metadata: Optional serializable metadata block for the event. expected_version: Used for concurrency control. If a positive integer is provided, EventStore will check that the stream is at that version before accepting a write. There are three magic values: -4: StreamMustExist. Checks that the stream already exists. -2: Any. Disables concurrency checks -1: NoStream. Checks that the stream does not yet exist. 0: EmptyStream. Checks that the stream has been explicitly created but does not yet contain any events. require_master: If true, slave nodes will reject this message. Examples: >>> async with connect() as conn: >>> await conn.publish_event( >>> "inventory_item-1", >>> "item_created", >>> body={ "item-id": 1, "created-date": "2018-08-19" }, >>> expected_version=ExpectedVersion.StreamMustNotExist >>> ) >>> >>> await conn.publish_event( >>> "inventory_item-1", >>> "item_deleted", >>> expected_version=1, >>> metadata={'deleted-by': 'bob' } >>> )
[ "Publish", "a", "single", "event", "to", "the", "EventStore", "." ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L601-L663
train
madedotcom/photon-pump
photonpump/connection.py
Client.get_event
async def get_event( self, stream: str, event_number: int, resolve_links=True, require_master=False, correlation_id: uuid.UUID = None, ) -> msg.Event: """ Get a single event by stream and event number. Args: stream: The name of the stream containing the event. event_number: The sequence number of the event to read. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Returns: The resolved event if found, else None. Examples: >>> async with connection() as conn: >>> await conn.publish("inventory_item-1", "item_created") >>> event = await conn.get_event("inventory_item-1", 1) >>> print(event) """ correlation_id = correlation_id or uuid.uuid4() cmd = convo.ReadEvent( stream, event_number, resolve_links, require_master, conversation_id=correlation_id, ) result = await self.dispatcher.start_conversation(cmd) return await result
python
async def get_event( self, stream: str, event_number: int, resolve_links=True, require_master=False, correlation_id: uuid.UUID = None, ) -> msg.Event: """ Get a single event by stream and event number. Args: stream: The name of the stream containing the event. event_number: The sequence number of the event to read. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Returns: The resolved event if found, else None. Examples: >>> async with connection() as conn: >>> await conn.publish("inventory_item-1", "item_created") >>> event = await conn.get_event("inventory_item-1", 1) >>> print(event) """ correlation_id = correlation_id or uuid.uuid4() cmd = convo.ReadEvent( stream, event_number, resolve_links, require_master, conversation_id=correlation_id, ) result = await self.dispatcher.start_conversation(cmd) return await result
[ "async", "def", "get_event", "(", "self", ",", "stream", ":", "str", ",", "event_number", ":", "int", ",", "resolve_links", "=", "True", ",", "require_master", "=", "False", ",", "correlation_id", ":", "uuid", ".", "UUID", "=", "None", ",", ")", "->", "msg", ".", "Event", ":", "correlation_id", "=", "correlation_id", "or", "uuid", ".", "uuid4", "(", ")", "cmd", "=", "convo", ".", "ReadEvent", "(", "stream", ",", "event_number", ",", "resolve_links", ",", "require_master", ",", "conversation_id", "=", "correlation_id", ",", ")", "result", "=", "await", "self", ".", "dispatcher", ".", "start_conversation", "(", "cmd", ")", "return", "await", "result" ]
Get a single event by stream and event number. Args: stream: The name of the stream containing the event. event_number: The sequence number of the event to read. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Returns: The resolved event if found, else None. Examples: >>> async with connection() as conn: >>> await conn.publish("inventory_item-1", "item_created") >>> event = await conn.get_event("inventory_item-1", 1) >>> print(event)
[ "Get", "a", "single", "event", "by", "stream", "and", "event", "number", "." ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L682-L724
train
madedotcom/photon-pump
photonpump/connection.py
Client.get
async def get( self, stream: str, direction: msg.StreamDirection = msg.StreamDirection.Forward, from_event: int = 0, max_count: int = 100, resolve_links: bool = True, require_master: bool = False, correlation_id: uuid.UUID = None, ): """ Read a range of events from a stream. Args: stream: The name of the stream to read direction (optional): Controls whether to read events forward or backward. defaults to Forward. from_event (optional): The first event to read. defaults to the beginning of the stream when direction is forward and the end of the stream if direction is backward. max_count (optional): The maximum number of events to return. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Read 5 events from a stream >>> async for event in conn.get("my-stream", max_count=5): >>> print(event) Read events 21 to 30 >>> async for event in conn.get("my-stream", max_count=10, from_event=21): >>> print(event) Read 10 most recent events in reverse order >>> async for event in conn.get( "my-stream", max_count=10, direction=StreamDirection.Backward ): >>> print(event) """ correlation_id = correlation_id cmd = convo.ReadStreamEvents( stream, from_event, max_count, resolve_links, require_master, direction=direction, ) result = await self.dispatcher.start_conversation(cmd) return await result
python
async def get( self, stream: str, direction: msg.StreamDirection = msg.StreamDirection.Forward, from_event: int = 0, max_count: int = 100, resolve_links: bool = True, require_master: bool = False, correlation_id: uuid.UUID = None, ): """ Read a range of events from a stream. Args: stream: The name of the stream to read direction (optional): Controls whether to read events forward or backward. defaults to Forward. from_event (optional): The first event to read. defaults to the beginning of the stream when direction is forward and the end of the stream if direction is backward. max_count (optional): The maximum number of events to return. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Read 5 events from a stream >>> async for event in conn.get("my-stream", max_count=5): >>> print(event) Read events 21 to 30 >>> async for event in conn.get("my-stream", max_count=10, from_event=21): >>> print(event) Read 10 most recent events in reverse order >>> async for event in conn.get( "my-stream", max_count=10, direction=StreamDirection.Backward ): >>> print(event) """ correlation_id = correlation_id cmd = convo.ReadStreamEvents( stream, from_event, max_count, resolve_links, require_master, direction=direction, ) result = await self.dispatcher.start_conversation(cmd) return await result
[ "async", "def", "get", "(", "self", ",", "stream", ":", "str", ",", "direction", ":", "msg", ".", "StreamDirection", "=", "msg", ".", "StreamDirection", ".", "Forward", ",", "from_event", ":", "int", "=", "0", ",", "max_count", ":", "int", "=", "100", ",", "resolve_links", ":", "bool", "=", "True", ",", "require_master", ":", "bool", "=", "False", ",", "correlation_id", ":", "uuid", ".", "UUID", "=", "None", ",", ")", ":", "correlation_id", "=", "correlation_id", "cmd", "=", "convo", ".", "ReadStreamEvents", "(", "stream", ",", "from_event", ",", "max_count", ",", "resolve_links", ",", "require_master", ",", "direction", "=", "direction", ",", ")", "result", "=", "await", "self", ".", "dispatcher", ".", "start_conversation", "(", "cmd", ")", "return", "await", "result" ]
Read a range of events from a stream. Args: stream: The name of the stream to read direction (optional): Controls whether to read events forward or backward. defaults to Forward. from_event (optional): The first event to read. defaults to the beginning of the stream when direction is forward and the end of the stream if direction is backward. max_count (optional): The maximum number of events to return. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Read 5 events from a stream >>> async for event in conn.get("my-stream", max_count=5): >>> print(event) Read events 21 to 30 >>> async for event in conn.get("my-stream", max_count=10, from_event=21): >>> print(event) Read 10 most recent events in reverse order >>> async for event in conn.get( "my-stream", max_count=10, direction=StreamDirection.Backward ): >>> print(event)
[ "Read", "a", "range", "of", "events", "from", "a", "stream", "." ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L726-L786
train
madedotcom/photon-pump
photonpump/connection.py
Client.get_all
async def get_all( self, direction: msg.StreamDirection = msg.StreamDirection.Forward, from_position: Optional[Union[msg.Position, msg._PositionSentinel]] = None, max_count: int = 100, resolve_links: bool = True, require_master: bool = False, correlation_id: uuid.UUID = None, ): """ Read a range of events from the whole database. Args: direction (optional): Controls whether to read events forward or backward. defaults to Forward. from_position (optional): The position to read from. defaults to the beginning of the stream when direction is forward and the end of the stream if direction is backward. max_count (optional): The maximum number of events to return. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Read 5 events >>> async for event in conn.get_all(max_count=5): >>> print(event) Read 10 most recent events in reverse order >>> async for event in conn.get_all( max_count=10, direction=StreamDirection.Backward ): >>> print(event) """ correlation_id = correlation_id cmd = convo.ReadAllEvents( msg.Position.for_direction(direction, from_position), max_count, resolve_links, require_master, direction=direction, credentials=self.credential, ) result = await self.dispatcher.start_conversation(cmd) return await result
python
async def get_all( self, direction: msg.StreamDirection = msg.StreamDirection.Forward, from_position: Optional[Union[msg.Position, msg._PositionSentinel]] = None, max_count: int = 100, resolve_links: bool = True, require_master: bool = False, correlation_id: uuid.UUID = None, ): """ Read a range of events from the whole database. Args: direction (optional): Controls whether to read events forward or backward. defaults to Forward. from_position (optional): The position to read from. defaults to the beginning of the stream when direction is forward and the end of the stream if direction is backward. max_count (optional): The maximum number of events to return. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Read 5 events >>> async for event in conn.get_all(max_count=5): >>> print(event) Read 10 most recent events in reverse order >>> async for event in conn.get_all( max_count=10, direction=StreamDirection.Backward ): >>> print(event) """ correlation_id = correlation_id cmd = convo.ReadAllEvents( msg.Position.for_direction(direction, from_position), max_count, resolve_links, require_master, direction=direction, credentials=self.credential, ) result = await self.dispatcher.start_conversation(cmd) return await result
[ "async", "def", "get_all", "(", "self", ",", "direction", ":", "msg", ".", "StreamDirection", "=", "msg", ".", "StreamDirection", ".", "Forward", ",", "from_position", ":", "Optional", "[", "Union", "[", "msg", ".", "Position", ",", "msg", ".", "_PositionSentinel", "]", "]", "=", "None", ",", "max_count", ":", "int", "=", "100", ",", "resolve_links", ":", "bool", "=", "True", ",", "require_master", ":", "bool", "=", "False", ",", "correlation_id", ":", "uuid", ".", "UUID", "=", "None", ",", ")", ":", "correlation_id", "=", "correlation_id", "cmd", "=", "convo", ".", "ReadAllEvents", "(", "msg", ".", "Position", ".", "for_direction", "(", "direction", ",", "from_position", ")", ",", "max_count", ",", "resolve_links", ",", "require_master", ",", "direction", "=", "direction", ",", "credentials", "=", "self", ".", "credential", ",", ")", "result", "=", "await", "self", ".", "dispatcher", ".", "start_conversation", "(", "cmd", ")", "return", "await", "result" ]
Read a range of events from the whole database. Args: direction (optional): Controls whether to read events forward or backward. defaults to Forward. from_position (optional): The position to read from. defaults to the beginning of the stream when direction is forward and the end of the stream if direction is backward. max_count (optional): The maximum number of events to return. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Read 5 events >>> async for event in conn.get_all(max_count=5): >>> print(event) Read 10 most recent events in reverse order >>> async for event in conn.get_all( max_count=10, direction=StreamDirection.Backward ): >>> print(event)
[ "Read", "a", "range", "of", "events", "from", "the", "whole", "database", "." ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L788-L840
train
madedotcom/photon-pump
photonpump/connection.py
Client.iter
async def iter( self, stream: str, direction: msg.StreamDirection = msg.StreamDirection.Forward, from_event: int = None, batch_size: int = 100, resolve_links: bool = True, require_master: bool = False, correlation_id: uuid.UUID = None, ): """ Read through a stream of events until the end and then stop. Args: stream: The name of the stream to read. direction: Controls whether to read forward or backward through the stream. Defaults to StreamDirection.Forward from_event: The sequence number of the first event to read from the stream. Reads from the appropriate end of the stream if unset. batch_size: The maximum number of events to read at a time. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Print every event from the stream "my-stream". >>> with async.connect() as conn: >>> async for event in conn.iter("my-stream"): >>> print(event) Print every event from the stream "my-stream" in reverse order >>> with async.connect() as conn: >>> async for event in conn.iter("my-stream", direction=StreamDirection.Backward): >>> print(event) Skip the first 10 events of the stream >>> with async.connect() as conn: >>> async for event in conn.iter("my-stream", from_event=11): >>> print(event) """ correlation_id = correlation_id or uuid.uuid4() cmd = convo.IterStreamEvents( stream, from_event, batch_size, resolve_links, direction=direction, credentials=self.credential, ) result = await self.dispatcher.start_conversation(cmd) iterator = await result async for event in iterator: yield event
python
async def iter( self, stream: str, direction: msg.StreamDirection = msg.StreamDirection.Forward, from_event: int = None, batch_size: int = 100, resolve_links: bool = True, require_master: bool = False, correlation_id: uuid.UUID = None, ): """ Read through a stream of events until the end and then stop. Args: stream: The name of the stream to read. direction: Controls whether to read forward or backward through the stream. Defaults to StreamDirection.Forward from_event: The sequence number of the first event to read from the stream. Reads from the appropriate end of the stream if unset. batch_size: The maximum number of events to read at a time. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Print every event from the stream "my-stream". >>> with async.connect() as conn: >>> async for event in conn.iter("my-stream"): >>> print(event) Print every event from the stream "my-stream" in reverse order >>> with async.connect() as conn: >>> async for event in conn.iter("my-stream", direction=StreamDirection.Backward): >>> print(event) Skip the first 10 events of the stream >>> with async.connect() as conn: >>> async for event in conn.iter("my-stream", from_event=11): >>> print(event) """ correlation_id = correlation_id or uuid.uuid4() cmd = convo.IterStreamEvents( stream, from_event, batch_size, resolve_links, direction=direction, credentials=self.credential, ) result = await self.dispatcher.start_conversation(cmd) iterator = await result async for event in iterator: yield event
[ "async", "def", "iter", "(", "self", ",", "stream", ":", "str", ",", "direction", ":", "msg", ".", "StreamDirection", "=", "msg", ".", "StreamDirection", ".", "Forward", ",", "from_event", ":", "int", "=", "None", ",", "batch_size", ":", "int", "=", "100", ",", "resolve_links", ":", "bool", "=", "True", ",", "require_master", ":", "bool", "=", "False", ",", "correlation_id", ":", "uuid", ".", "UUID", "=", "None", ",", ")", ":", "correlation_id", "=", "correlation_id", "or", "uuid", ".", "uuid4", "(", ")", "cmd", "=", "convo", ".", "IterStreamEvents", "(", "stream", ",", "from_event", ",", "batch_size", ",", "resolve_links", ",", "direction", "=", "direction", ",", "credentials", "=", "self", ".", "credential", ",", ")", "result", "=", "await", "self", ".", "dispatcher", ".", "start_conversation", "(", "cmd", ")", "iterator", "=", "await", "result", "async", "for", "event", "in", "iterator", ":", "yield", "event" ]
Read through a stream of events until the end and then stop. Args: stream: The name of the stream to read. direction: Controls whether to read forward or backward through the stream. Defaults to StreamDirection.Forward from_event: The sequence number of the first event to read from the stream. Reads from the appropriate end of the stream if unset. batch_size: The maximum number of events to read at a time. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Print every event from the stream "my-stream". >>> with async.connect() as conn: >>> async for event in conn.iter("my-stream"): >>> print(event) Print every event from the stream "my-stream" in reverse order >>> with async.connect() as conn: >>> async for event in conn.iter("my-stream", direction=StreamDirection.Backward): >>> print(event) Skip the first 10 events of the stream >>> with async.connect() as conn: >>> async for event in conn.iter("my-stream", from_event=11): >>> print(event)
[ "Read", "through", "a", "stream", "of", "events", "until", "the", "end", "and", "then", "stop", "." ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L842-L902
train
madedotcom/photon-pump
photonpump/connection.py
Client.iter_all
async def iter_all( self, direction: msg.StreamDirection = msg.StreamDirection.Forward, from_position: Optional[Union[msg.Position, msg._PositionSentinel]] = None, batch_size: int = 100, resolve_links: bool = True, require_master: bool = False, correlation_id: Optional[uuid.UUID] = None, ): """ Read through all the events in the database. Args: direction (optional): Controls whether to read forward or backward through the events. Defaults to StreamDirection.Forward from_position (optional): The position to start reading from. Defaults to photonpump.Beginning when direction is Forward, photonpump.End when direction is Backward. batch_size (optional): The maximum number of events to read at a time. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Print every event from the database. >>> with async.connect() as conn: >>> async for event in conn.iter_all() >>> print(event) Print every event from the database in reverse order >>> with async.connect() as conn: >>> async for event in conn.iter_all(direction=StreamDirection.Backward): >>> print(event) Start reading from a known commit position >>> with async.connect() as conn: >>> async for event in conn.iter_all(from_position=Position(12345)) >>> print(event) """ correlation_id = correlation_id cmd = convo.IterAllEvents( msg.Position.for_direction(direction, from_position), batch_size, resolve_links, require_master, direction, self.credential, correlation_id, ) result = await self.dispatcher.start_conversation(cmd) iterator = await result async for event in iterator: yield event
python
async def iter_all( self, direction: msg.StreamDirection = msg.StreamDirection.Forward, from_position: Optional[Union[msg.Position, msg._PositionSentinel]] = None, batch_size: int = 100, resolve_links: bool = True, require_master: bool = False, correlation_id: Optional[uuid.UUID] = None, ): """ Read through all the events in the database. Args: direction (optional): Controls whether to read forward or backward through the events. Defaults to StreamDirection.Forward from_position (optional): The position to start reading from. Defaults to photonpump.Beginning when direction is Forward, photonpump.End when direction is Backward. batch_size (optional): The maximum number of events to read at a time. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Print every event from the database. >>> with async.connect() as conn: >>> async for event in conn.iter_all() >>> print(event) Print every event from the database in reverse order >>> with async.connect() as conn: >>> async for event in conn.iter_all(direction=StreamDirection.Backward): >>> print(event) Start reading from a known commit position >>> with async.connect() as conn: >>> async for event in conn.iter_all(from_position=Position(12345)) >>> print(event) """ correlation_id = correlation_id cmd = convo.IterAllEvents( msg.Position.for_direction(direction, from_position), batch_size, resolve_links, require_master, direction, self.credential, correlation_id, ) result = await self.dispatcher.start_conversation(cmd) iterator = await result async for event in iterator: yield event
[ "async", "def", "iter_all", "(", "self", ",", "direction", ":", "msg", ".", "StreamDirection", "=", "msg", ".", "StreamDirection", ".", "Forward", ",", "from_position", ":", "Optional", "[", "Union", "[", "msg", ".", "Position", ",", "msg", ".", "_PositionSentinel", "]", "]", "=", "None", ",", "batch_size", ":", "int", "=", "100", ",", "resolve_links", ":", "bool", "=", "True", ",", "require_master", ":", "bool", "=", "False", ",", "correlation_id", ":", "Optional", "[", "uuid", ".", "UUID", "]", "=", "None", ",", ")", ":", "correlation_id", "=", "correlation_id", "cmd", "=", "convo", ".", "IterAllEvents", "(", "msg", ".", "Position", ".", "for_direction", "(", "direction", ",", "from_position", ")", ",", "batch_size", ",", "resolve_links", ",", "require_master", ",", "direction", ",", "self", ".", "credential", ",", "correlation_id", ",", ")", "result", "=", "await", "self", ".", "dispatcher", ".", "start_conversation", "(", "cmd", ")", "iterator", "=", "await", "result", "async", "for", "event", "in", "iterator", ":", "yield", "event" ]
Read through all the events in the database. Args: direction (optional): Controls whether to read forward or backward through the events. Defaults to StreamDirection.Forward from_position (optional): The position to start reading from. Defaults to photonpump.Beginning when direction is Forward, photonpump.End when direction is Backward. batch_size (optional): The maximum number of events to read at a time. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Print every event from the database. >>> with async.connect() as conn: >>> async for event in conn.iter_all() >>> print(event) Print every event from the database in reverse order >>> with async.connect() as conn: >>> async for event in conn.iter_all(direction=StreamDirection.Backward): >>> print(event) Start reading from a known commit position >>> with async.connect() as conn: >>> async for event in conn.iter_all(from_position=Position(12345)) >>> print(event)
[ "Read", "through", "all", "the", "events", "in", "the", "database", "." ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L904-L965
train
madedotcom/photon-pump
photonpump/connection.py
Client.subscribe_to
async def subscribe_to( self, stream, start_from=-1, resolve_link_tos=True, batch_size: int = 100 ): """ Subscribe to receive notifications when a new event is published to a stream. Args: stream: The name of the stream. start_from (optional): The first event to read. This parameter defaults to the magic value -1 which is treated as meaning "from the end of the stream". IF this value is used, no historical events will be returned. For any other value, photonpump will read all events from start_from until the end of the stream in pages of max_size before subscribing to receive new events as they arrive. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. batch_size (optioal): The number of events to pull down from eventstore in one go. Returns: A VolatileSubscription. Examples: >>> async with connection() as conn: >>> # Subscribe only to NEW events on the cpu-metrics stream >>> subs = await conn.subscribe_to("price-changes") >>> async for event in subs.events: >>> print(event) >>> async with connection() as conn: >>> # Read all historical events and then receive updates as they >>> # arrive. >>> subs = await conn.subscribe_to("price-changes", start_from=0) >>> async for event in subs.events: >>> print(event) """ if start_from == -1: cmd: convo.Conversation = convo.SubscribeToStream( stream, resolve_link_tos, credentials=self.credential ) else: cmd = convo.CatchupSubscription( stream, start_from, batch_size, credential=self.credential ) future = await self.dispatcher.start_conversation(cmd) return await future
python
async def subscribe_to( self, stream, start_from=-1, resolve_link_tos=True, batch_size: int = 100 ): """ Subscribe to receive notifications when a new event is published to a stream. Args: stream: The name of the stream. start_from (optional): The first event to read. This parameter defaults to the magic value -1 which is treated as meaning "from the end of the stream". IF this value is used, no historical events will be returned. For any other value, photonpump will read all events from start_from until the end of the stream in pages of max_size before subscribing to receive new events as they arrive. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. batch_size (optioal): The number of events to pull down from eventstore in one go. Returns: A VolatileSubscription. Examples: >>> async with connection() as conn: >>> # Subscribe only to NEW events on the cpu-metrics stream >>> subs = await conn.subscribe_to("price-changes") >>> async for event in subs.events: >>> print(event) >>> async with connection() as conn: >>> # Read all historical events and then receive updates as they >>> # arrive. >>> subs = await conn.subscribe_to("price-changes", start_from=0) >>> async for event in subs.events: >>> print(event) """ if start_from == -1: cmd: convo.Conversation = convo.SubscribeToStream( stream, resolve_link_tos, credentials=self.credential ) else: cmd = convo.CatchupSubscription( stream, start_from, batch_size, credential=self.credential ) future = await self.dispatcher.start_conversation(cmd) return await future
[ "async", "def", "subscribe_to", "(", "self", ",", "stream", ",", "start_from", "=", "-", "1", ",", "resolve_link_tos", "=", "True", ",", "batch_size", ":", "int", "=", "100", ")", ":", "if", "start_from", "==", "-", "1", ":", "cmd", ":", "convo", ".", "Conversation", "=", "convo", ".", "SubscribeToStream", "(", "stream", ",", "resolve_link_tos", ",", "credentials", "=", "self", ".", "credential", ")", "else", ":", "cmd", "=", "convo", ".", "CatchupSubscription", "(", "stream", ",", "start_from", ",", "batch_size", ",", "credential", "=", "self", ".", "credential", ")", "future", "=", "await", "self", ".", "dispatcher", ".", "start_conversation", "(", "cmd", ")", "return", "await", "future" ]
Subscribe to receive notifications when a new event is published to a stream. Args: stream: The name of the stream. start_from (optional): The first event to read. This parameter defaults to the magic value -1 which is treated as meaning "from the end of the stream". IF this value is used, no historical events will be returned. For any other value, photonpump will read all events from start_from until the end of the stream in pages of max_size before subscribing to receive new events as they arrive. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. batch_size (optioal): The number of events to pull down from eventstore in one go. Returns: A VolatileSubscription. Examples: >>> async with connection() as conn: >>> # Subscribe only to NEW events on the cpu-metrics stream >>> subs = await conn.subscribe_to("price-changes") >>> async for event in subs.events: >>> print(event) >>> async with connection() as conn: >>> # Read all historical events and then receive updates as they >>> # arrive. >>> subs = await conn.subscribe_to("price-changes", start_from=0) >>> async for event in subs.events: >>> print(event)
[ "Subscribe", "to", "receive", "notifications", "when", "a", "new", "event", "is", "published", "to", "a", "stream", "." ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L1029-L1086
train
madedotcom/photon-pump
photonpump/discovery.py
prefer_master
def prefer_master(nodes: List[DiscoveredNode]) -> Optional[DiscoveredNode]: """ Select the master if available, otherwise fall back to a replica. """ return max(nodes, key=attrgetter("state"))
python
def prefer_master(nodes: List[DiscoveredNode]) -> Optional[DiscoveredNode]: """ Select the master if available, otherwise fall back to a replica. """ return max(nodes, key=attrgetter("state"))
[ "def", "prefer_master", "(", "nodes", ":", "List", "[", "DiscoveredNode", "]", ")", "->", "Optional", "[", "DiscoveredNode", "]", ":", "return", "max", "(", "nodes", ",", "key", "=", "attrgetter", "(", "\"state\"", ")", ")" ]
Select the master if available, otherwise fall back to a replica.
[ "Select", "the", "master", "if", "available", "otherwise", "fall", "back", "to", "a", "replica", "." ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/discovery.py#L60-L64
train
madedotcom/photon-pump
photonpump/discovery.py
prefer_replica
def prefer_replica(nodes: List[DiscoveredNode]) -> Optional[DiscoveredNode]: """ Select a random replica if any are available or fall back to the master. """ masters = [node for node in nodes if node.state == NodeState.Master] replicas = [node for node in nodes if node.state != NodeState.Master] if replicas: return random.choice(replicas) else: # if you have more than one master then you're on your own, bud. return masters[0]
python
def prefer_replica(nodes: List[DiscoveredNode]) -> Optional[DiscoveredNode]: """ Select a random replica if any are available or fall back to the master. """ masters = [node for node in nodes if node.state == NodeState.Master] replicas = [node for node in nodes if node.state != NodeState.Master] if replicas: return random.choice(replicas) else: # if you have more than one master then you're on your own, bud. return masters[0]
[ "def", "prefer_replica", "(", "nodes", ":", "List", "[", "DiscoveredNode", "]", ")", "->", "Optional", "[", "DiscoveredNode", "]", ":", "masters", "=", "[", "node", "for", "node", "in", "nodes", "if", "node", ".", "state", "==", "NodeState", ".", "Master", "]", "replicas", "=", "[", "node", "for", "node", "in", "nodes", "if", "node", ".", "state", "!=", "NodeState", ".", "Master", "]", "if", "replicas", ":", "return", "random", ".", "choice", "(", "replicas", ")", "else", ":", "# if you have more than one master then you're on your own, bud.", "return", "masters", "[", "0", "]" ]
Select a random replica if any are available or fall back to the master.
[ "Select", "a", "random", "replica", "if", "any", "are", "available", "or", "fall", "back", "to", "the", "master", "." ]
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/discovery.py#L67-L79
train
nteract/vdom
vdom/core.py
create_event_handler
def create_event_handler(event_type, handler): """Register a comm and return a serializable object with target name""" target_name = '{hash}_{event_type}'.format(hash=hash(handler), event_type=event_type) def handle_comm_opened(comm, msg): @comm.on_msg def _handle_msg(msg): data = msg['content']['data'] event = json.loads(data) return_value = handler(event) if return_value: comm.send(return_value) comm.send('Comm target "{target_name}" registered by vdom'.format(target_name=target_name)) # Register a new comm for this event handler if get_ipython(): get_ipython().kernel.comm_manager.register_target(target_name, handle_comm_opened) # Return a serialized object return target_name
python
def create_event_handler(event_type, handler): """Register a comm and return a serializable object with target name""" target_name = '{hash}_{event_type}'.format(hash=hash(handler), event_type=event_type) def handle_comm_opened(comm, msg): @comm.on_msg def _handle_msg(msg): data = msg['content']['data'] event = json.loads(data) return_value = handler(event) if return_value: comm.send(return_value) comm.send('Comm target "{target_name}" registered by vdom'.format(target_name=target_name)) # Register a new comm for this event handler if get_ipython(): get_ipython().kernel.comm_manager.register_target(target_name, handle_comm_opened) # Return a serialized object return target_name
[ "def", "create_event_handler", "(", "event_type", ",", "handler", ")", ":", "target_name", "=", "'{hash}_{event_type}'", ".", "format", "(", "hash", "=", "hash", "(", "handler", ")", ",", "event_type", "=", "event_type", ")", "def", "handle_comm_opened", "(", "comm", ",", "msg", ")", ":", "@", "comm", ".", "on_msg", "def", "_handle_msg", "(", "msg", ")", ":", "data", "=", "msg", "[", "'content'", "]", "[", "'data'", "]", "event", "=", "json", ".", "loads", "(", "data", ")", "return_value", "=", "handler", "(", "event", ")", "if", "return_value", ":", "comm", ".", "send", "(", "return_value", ")", "comm", ".", "send", "(", "'Comm target \"{target_name}\" registered by vdom'", ".", "format", "(", "target_name", "=", "target_name", ")", ")", "# Register a new comm for this event handler", "if", "get_ipython", "(", ")", ":", "get_ipython", "(", ")", ".", "kernel", ".", "comm_manager", ".", "register_target", "(", "target_name", ",", "handle_comm_opened", ")", "# Return a serialized object", "return", "target_name" ]
Register a comm and return a serializable object with target name
[ "Register", "a", "comm", "and", "return", "a", "serializable", "object", "with", "target", "name" ]
d1ef48dc20d50379b8137a104125c92f64b916e4
https://github.com/nteract/vdom/blob/d1ef48dc20d50379b8137a104125c92f64b916e4/vdom/core.py#L49-L70
train
nteract/vdom
vdom/core.py
to_json
def to_json(el, schema=None): """Convert an element to VDOM JSON If you wish to validate the JSON, pass in a schema via the schema keyword argument. If a schema is provided, this raises a ValidationError if JSON does not match the schema. """ if type(el) is str: json_el = el elif type(el) is list: json_el = list(map(to_json, el)) elif type(el) is dict: assert 'tagName' in el json_el = el.copy() if 'attributes' not in el: json_el['attributes'] = {} if 'children' not in el: json_el['children'] = [] elif isinstance(el, VDOM): json_el = el.to_dict() else: json_el = el if schema: try: validate(instance=json_el, schema=schema, cls=Draft4Validator) except ValidationError as e: raise ValidationError(_validate_err_template.format(schema, e)) return json_el
python
def to_json(el, schema=None): """Convert an element to VDOM JSON If you wish to validate the JSON, pass in a schema via the schema keyword argument. If a schema is provided, this raises a ValidationError if JSON does not match the schema. """ if type(el) is str: json_el = el elif type(el) is list: json_el = list(map(to_json, el)) elif type(el) is dict: assert 'tagName' in el json_el = el.copy() if 'attributes' not in el: json_el['attributes'] = {} if 'children' not in el: json_el['children'] = [] elif isinstance(el, VDOM): json_el = el.to_dict() else: json_el = el if schema: try: validate(instance=json_el, schema=schema, cls=Draft4Validator) except ValidationError as e: raise ValidationError(_validate_err_template.format(schema, e)) return json_el
[ "def", "to_json", "(", "el", ",", "schema", "=", "None", ")", ":", "if", "type", "(", "el", ")", "is", "str", ":", "json_el", "=", "el", "elif", "type", "(", "el", ")", "is", "list", ":", "json_el", "=", "list", "(", "map", "(", "to_json", ",", "el", ")", ")", "elif", "type", "(", "el", ")", "is", "dict", ":", "assert", "'tagName'", "in", "el", "json_el", "=", "el", ".", "copy", "(", ")", "if", "'attributes'", "not", "in", "el", ":", "json_el", "[", "'attributes'", "]", "=", "{", "}", "if", "'children'", "not", "in", "el", ":", "json_el", "[", "'children'", "]", "=", "[", "]", "elif", "isinstance", "(", "el", ",", "VDOM", ")", ":", "json_el", "=", "el", ".", "to_dict", "(", ")", "else", ":", "json_el", "=", "el", "if", "schema", ":", "try", ":", "validate", "(", "instance", "=", "json_el", ",", "schema", "=", "schema", ",", "cls", "=", "Draft4Validator", ")", "except", "ValidationError", "as", "e", ":", "raise", "ValidationError", "(", "_validate_err_template", ".", "format", "(", "schema", ",", "e", ")", ")", "return", "json_el" ]
Convert an element to VDOM JSON If you wish to validate the JSON, pass in a schema via the schema keyword argument. If a schema is provided, this raises a ValidationError if JSON does not match the schema.
[ "Convert", "an", "element", "to", "VDOM", "JSON" ]
d1ef48dc20d50379b8137a104125c92f64b916e4
https://github.com/nteract/vdom/blob/d1ef48dc20d50379b8137a104125c92f64b916e4/vdom/core.py#L73-L102
train
nteract/vdom
vdom/core.py
create_component
def create_component(tag_name, allow_children=True): """ Create a component for an HTML Tag Examples: >>> marquee = create_component('marquee') >>> marquee('woohoo') <marquee>woohoo</marquee> """ def _component(*children, **kwargs): if 'children' in kwargs: children = kwargs.pop('children') else: # Flatten children under specific circumstances # This supports the use case of div([a, b, c]) # And allows users to skip the * operator if len(children) == 1 and isinstance(children[0], list): # We want children to be tuples and not lists, so # they can be immutable children = tuple(children[0]) style = None event_handlers = None attributes = dict(**kwargs) if 'style' in kwargs: style = kwargs.pop('style') if 'attributes' in kwargs: attributes = kwargs['attributes'] for key, value in attributes.items(): if callable(value): attributes = attributes.copy() if event_handlers == None: event_handlers = {key: attributes.pop(key)} else: event_handlers[key] = attributes.pop(key) if not allow_children and children: # We don't allow children, but some were passed in raise ValueError('<{tag_name} /> cannot have children'.format(tag_name=tag_name)) v = VDOM(tag_name, attributes, style, children, None, event_handlers) return v return _component
python
def create_component(tag_name, allow_children=True): """ Create a component for an HTML Tag Examples: >>> marquee = create_component('marquee') >>> marquee('woohoo') <marquee>woohoo</marquee> """ def _component(*children, **kwargs): if 'children' in kwargs: children = kwargs.pop('children') else: # Flatten children under specific circumstances # This supports the use case of div([a, b, c]) # And allows users to skip the * operator if len(children) == 1 and isinstance(children[0], list): # We want children to be tuples and not lists, so # they can be immutable children = tuple(children[0]) style = None event_handlers = None attributes = dict(**kwargs) if 'style' in kwargs: style = kwargs.pop('style') if 'attributes' in kwargs: attributes = kwargs['attributes'] for key, value in attributes.items(): if callable(value): attributes = attributes.copy() if event_handlers == None: event_handlers = {key: attributes.pop(key)} else: event_handlers[key] = attributes.pop(key) if not allow_children and children: # We don't allow children, but some were passed in raise ValueError('<{tag_name} /> cannot have children'.format(tag_name=tag_name)) v = VDOM(tag_name, attributes, style, children, None, event_handlers) return v return _component
[ "def", "create_component", "(", "tag_name", ",", "allow_children", "=", "True", ")", ":", "def", "_component", "(", "*", "children", ",", "*", "*", "kwargs", ")", ":", "if", "'children'", "in", "kwargs", ":", "children", "=", "kwargs", ".", "pop", "(", "'children'", ")", "else", ":", "# Flatten children under specific circumstances", "# This supports the use case of div([a, b, c])", "# And allows users to skip the * operator", "if", "len", "(", "children", ")", "==", "1", "and", "isinstance", "(", "children", "[", "0", "]", ",", "list", ")", ":", "# We want children to be tuples and not lists, so", "# they can be immutable", "children", "=", "tuple", "(", "children", "[", "0", "]", ")", "style", "=", "None", "event_handlers", "=", "None", "attributes", "=", "dict", "(", "*", "*", "kwargs", ")", "if", "'style'", "in", "kwargs", ":", "style", "=", "kwargs", ".", "pop", "(", "'style'", ")", "if", "'attributes'", "in", "kwargs", ":", "attributes", "=", "kwargs", "[", "'attributes'", "]", "for", "key", ",", "value", "in", "attributes", ".", "items", "(", ")", ":", "if", "callable", "(", "value", ")", ":", "attributes", "=", "attributes", ".", "copy", "(", ")", "if", "event_handlers", "==", "None", ":", "event_handlers", "=", "{", "key", ":", "attributes", ".", "pop", "(", "key", ")", "}", "else", ":", "event_handlers", "[", "key", "]", "=", "attributes", ".", "pop", "(", "key", ")", "if", "not", "allow_children", "and", "children", ":", "# We don't allow children, but some were passed in", "raise", "ValueError", "(", "'<{tag_name} /> cannot have children'", ".", "format", "(", "tag_name", "=", "tag_name", ")", ")", "v", "=", "VDOM", "(", "tag_name", ",", "attributes", ",", "style", ",", "children", ",", "None", ",", "event_handlers", ")", "return", "v", "return", "_component" ]
Create a component for an HTML Tag Examples: >>> marquee = create_component('marquee') >>> marquee('woohoo') <marquee>woohoo</marquee>
[ "Create", "a", "component", "for", "an", "HTML", "Tag" ]
d1ef48dc20d50379b8137a104125c92f64b916e4
https://github.com/nteract/vdom/blob/d1ef48dc20d50379b8137a104125c92f64b916e4/vdom/core.py#L301-L343
train
nteract/vdom
vdom/core.py
VDOM.validate
def validate(self, schema): """ Validate VDOM against given JSON Schema Raises ValidationError if schema does not match """ try: validate(instance=self.to_dict(), schema=schema, cls=Draft4Validator) except ValidationError as e: raise ValidationError(_validate_err_template.format(VDOM_SCHEMA, e))
python
def validate(self, schema): """ Validate VDOM against given JSON Schema Raises ValidationError if schema does not match """ try: validate(instance=self.to_dict(), schema=schema, cls=Draft4Validator) except ValidationError as e: raise ValidationError(_validate_err_template.format(VDOM_SCHEMA, e))
[ "def", "validate", "(", "self", ",", "schema", ")", ":", "try", ":", "validate", "(", "instance", "=", "self", ".", "to_dict", "(", ")", ",", "schema", "=", "schema", ",", "cls", "=", "Draft4Validator", ")", "except", "ValidationError", "as", "e", ":", "raise", "ValidationError", "(", "_validate_err_template", ".", "format", "(", "VDOM_SCHEMA", ",", "e", ")", ")" ]
Validate VDOM against given JSON Schema Raises ValidationError if schema does not match
[ "Validate", "VDOM", "against", "given", "JSON", "Schema" ]
d1ef48dc20d50379b8137a104125c92f64b916e4
https://github.com/nteract/vdom/blob/d1ef48dc20d50379b8137a104125c92f64b916e4/vdom/core.py#L174-L183
train
nteract/vdom
vdom/core.py
VDOM.to_dict
def to_dict(self): """Converts VDOM object to a dictionary that passes our schema """ attributes = dict(self.attributes.items()) if self.style: attributes.update({"style": dict(self.style.items())}) vdom_dict = {'tagName': self.tag_name, 'attributes': attributes} if self.event_handlers: event_handlers = dict(self.event_handlers.items()) for key, value in event_handlers.items(): value = create_event_handler(key, value) event_handlers[key] = value vdom_dict['eventHandlers'] = event_handlers if self.key: vdom_dict['key'] = self.key vdom_dict['children'] = [c.to_dict() if isinstance(c, VDOM) else c for c in self.children] return vdom_dict
python
def to_dict(self): """Converts VDOM object to a dictionary that passes our schema """ attributes = dict(self.attributes.items()) if self.style: attributes.update({"style": dict(self.style.items())}) vdom_dict = {'tagName': self.tag_name, 'attributes': attributes} if self.event_handlers: event_handlers = dict(self.event_handlers.items()) for key, value in event_handlers.items(): value = create_event_handler(key, value) event_handlers[key] = value vdom_dict['eventHandlers'] = event_handlers if self.key: vdom_dict['key'] = self.key vdom_dict['children'] = [c.to_dict() if isinstance(c, VDOM) else c for c in self.children] return vdom_dict
[ "def", "to_dict", "(", "self", ")", ":", "attributes", "=", "dict", "(", "self", ".", "attributes", ".", "items", "(", ")", ")", "if", "self", ".", "style", ":", "attributes", ".", "update", "(", "{", "\"style\"", ":", "dict", "(", "self", ".", "style", ".", "items", "(", ")", ")", "}", ")", "vdom_dict", "=", "{", "'tagName'", ":", "self", ".", "tag_name", ",", "'attributes'", ":", "attributes", "}", "if", "self", ".", "event_handlers", ":", "event_handlers", "=", "dict", "(", "self", ".", "event_handlers", ".", "items", "(", ")", ")", "for", "key", ",", "value", "in", "event_handlers", ".", "items", "(", ")", ":", "value", "=", "create_event_handler", "(", "key", ",", "value", ")", "event_handlers", "[", "key", "]", "=", "value", "vdom_dict", "[", "'eventHandlers'", "]", "=", "event_handlers", "if", "self", ".", "key", ":", "vdom_dict", "[", "'key'", "]", "=", "self", ".", "key", "vdom_dict", "[", "'children'", "]", "=", "[", "c", ".", "to_dict", "(", ")", "if", "isinstance", "(", "c", ",", "VDOM", ")", "else", "c", "for", "c", "in", "self", ".", "children", "]", "return", "vdom_dict" ]
Converts VDOM object to a dictionary that passes our schema
[ "Converts", "VDOM", "object", "to", "a", "dictionary", "that", "passes", "our", "schema" ]
d1ef48dc20d50379b8137a104125c92f64b916e4
https://github.com/nteract/vdom/blob/d1ef48dc20d50379b8137a104125c92f64b916e4/vdom/core.py#L185-L201
train
konstantint/PassportEye
passporteye/mrz/text.py
MRZ._guess_type
def _guess_type(mrz_lines): """Guesses the type of the MRZ from given lines. Returns 'TD1', 'TD2', 'TD3', 'MRVA', 'MRVB' or None. The algorithm is basically just counting lines, looking at their length and checking whether the first character is a 'V' >>> MRZ._guess_type([]) is None True >>> MRZ._guess_type([1]) is None True >>> MRZ._guess_type([1,2]) is None # No len() for numbers True >>> MRZ._guess_type(['a','b']) # This way passes 'TD2' >>> MRZ._guess_type(['*'*40, '*'*40]) 'TD3' >>> MRZ._guess_type([1,2,3]) 'TD1' >>> MRZ._guess_type(['V'*40, '*'*40]) 'MRVA' >>> MRZ._guess_type(['V'*36, '*'*36]) 'MRVB' """ try: if len(mrz_lines) == 3: return 'TD1' elif len(mrz_lines) == 2 and len(mrz_lines[0]) < 40 and len(mrz_lines[1]) < 40: return 'MRVB' if mrz_lines[0][0].upper() == 'V' else 'TD2' elif len(mrz_lines) == 2: return 'MRVA' if mrz_lines[0][0].upper() == 'V' else 'TD3' else: return None except Exception: #pylint: disable=broad-except return None
python
def _guess_type(mrz_lines): """Guesses the type of the MRZ from given lines. Returns 'TD1', 'TD2', 'TD3', 'MRVA', 'MRVB' or None. The algorithm is basically just counting lines, looking at their length and checking whether the first character is a 'V' >>> MRZ._guess_type([]) is None True >>> MRZ._guess_type([1]) is None True >>> MRZ._guess_type([1,2]) is None # No len() for numbers True >>> MRZ._guess_type(['a','b']) # This way passes 'TD2' >>> MRZ._guess_type(['*'*40, '*'*40]) 'TD3' >>> MRZ._guess_type([1,2,3]) 'TD1' >>> MRZ._guess_type(['V'*40, '*'*40]) 'MRVA' >>> MRZ._guess_type(['V'*36, '*'*36]) 'MRVB' """ try: if len(mrz_lines) == 3: return 'TD1' elif len(mrz_lines) == 2 and len(mrz_lines[0]) < 40 and len(mrz_lines[1]) < 40: return 'MRVB' if mrz_lines[0][0].upper() == 'V' else 'TD2' elif len(mrz_lines) == 2: return 'MRVA' if mrz_lines[0][0].upper() == 'V' else 'TD3' else: return None except Exception: #pylint: disable=broad-except return None
[ "def", "_guess_type", "(", "mrz_lines", ")", ":", "try", ":", "if", "len", "(", "mrz_lines", ")", "==", "3", ":", "return", "'TD1'", "elif", "len", "(", "mrz_lines", ")", "==", "2", "and", "len", "(", "mrz_lines", "[", "0", "]", ")", "<", "40", "and", "len", "(", "mrz_lines", "[", "1", "]", ")", "<", "40", ":", "return", "'MRVB'", "if", "mrz_lines", "[", "0", "]", "[", "0", "]", ".", "upper", "(", ")", "==", "'V'", "else", "'TD2'", "elif", "len", "(", "mrz_lines", ")", "==", "2", ":", "return", "'MRVA'", "if", "mrz_lines", "[", "0", "]", "[", "0", "]", ".", "upper", "(", ")", "==", "'V'", "else", "'TD3'", "else", ":", "return", "None", "except", "Exception", ":", "#pylint: disable=broad-except", "return", "None" ]
Guesses the type of the MRZ from given lines. Returns 'TD1', 'TD2', 'TD3', 'MRVA', 'MRVB' or None. The algorithm is basically just counting lines, looking at their length and checking whether the first character is a 'V' >>> MRZ._guess_type([]) is None True >>> MRZ._guess_type([1]) is None True >>> MRZ._guess_type([1,2]) is None # No len() for numbers True >>> MRZ._guess_type(['a','b']) # This way passes 'TD2' >>> MRZ._guess_type(['*'*40, '*'*40]) 'TD3' >>> MRZ._guess_type([1,2,3]) 'TD1' >>> MRZ._guess_type(['V'*40, '*'*40]) 'MRVA' >>> MRZ._guess_type(['V'*36, '*'*36]) 'MRVB'
[ "Guesses", "the", "type", "of", "the", "MRZ", "from", "given", "lines", ".", "Returns", "TD1", "TD2", "TD3", "MRVA", "MRVB", "or", "None", ".", "The", "algorithm", "is", "basically", "just", "counting", "lines", "looking", "at", "their", "length", "and", "checking", "whether", "the", "first", "character", "is", "a", "V" ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/text.py#L129-L160
train
konstantint/PassportEye
passporteye/util/pipeline.py
Pipeline.remove_component
def remove_component(self, name): """Removes an existing component with a given name, invalidating all the values computed by the previous component.""" if name not in self.components: raise Exception("No component named %s" % name) del self.components[name] del self.depends[name] for p in self.provides[name]: del self.whoprovides[p] self.invalidate(p) del self.provides[name]
python
def remove_component(self, name): """Removes an existing component with a given name, invalidating all the values computed by the previous component.""" if name not in self.components: raise Exception("No component named %s" % name) del self.components[name] del self.depends[name] for p in self.provides[name]: del self.whoprovides[p] self.invalidate(p) del self.provides[name]
[ "def", "remove_component", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "components", ":", "raise", "Exception", "(", "\"No component named %s\"", "%", "name", ")", "del", "self", ".", "components", "[", "name", "]", "del", "self", ".", "depends", "[", "name", "]", "for", "p", "in", "self", ".", "provides", "[", "name", "]", ":", "del", "self", ".", "whoprovides", "[", "p", "]", "self", ".", "invalidate", "(", "p", ")", "del", "self", ".", "provides", "[", "name", "]" ]
Removes an existing component with a given name, invalidating all the values computed by the previous component.
[ "Removes", "an", "existing", "component", "with", "a", "given", "name", "invalidating", "all", "the", "values", "computed", "by", "the", "previous", "component", "." ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/pipeline.py#L68-L78
train
konstantint/PassportEye
passporteye/util/pipeline.py
Pipeline.replace_component
def replace_component(self, name, callable, provides=None, depends=None): """Changes an existing component with a given name, invalidating all the values computed by the previous component and its successors.""" self.remove_component(name) self.add_component(name, callable, provides, depends)
python
def replace_component(self, name, callable, provides=None, depends=None): """Changes an existing component with a given name, invalidating all the values computed by the previous component and its successors.""" self.remove_component(name) self.add_component(name, callable, provides, depends)
[ "def", "replace_component", "(", "self", ",", "name", ",", "callable", ",", "provides", "=", "None", ",", "depends", "=", "None", ")", ":", "self", ".", "remove_component", "(", "name", ")", "self", ".", "add_component", "(", "name", ",", "callable", ",", "provides", ",", "depends", ")" ]
Changes an existing component with a given name, invalidating all the values computed by the previous component and its successors.
[ "Changes", "an", "existing", "component", "with", "a", "given", "name", "invalidating", "all", "the", "values", "computed", "by", "the", "previous", "component", "and", "its", "successors", "." ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/pipeline.py#L80-L84
train
konstantint/PassportEye
passporteye/util/pipeline.py
Pipeline.invalidate
def invalidate(self, key): """Remove the given data item along with all items that depend on it in the graph.""" if key not in self.data: return del self.data[key] # Find all components that used it and invalidate their results for cname in self.components: if key in self.depends[cname]: for downstream_key in self.provides[cname]: self.invalidate(downstream_key)
python
def invalidate(self, key): """Remove the given data item along with all items that depend on it in the graph.""" if key not in self.data: return del self.data[key] # Find all components that used it and invalidate their results for cname in self.components: if key in self.depends[cname]: for downstream_key in self.provides[cname]: self.invalidate(downstream_key)
[ "def", "invalidate", "(", "self", ",", "key", ")", ":", "if", "key", "not", "in", "self", ".", "data", ":", "return", "del", "self", ".", "data", "[", "key", "]", "# Find all components that used it and invalidate their results", "for", "cname", "in", "self", ".", "components", ":", "if", "key", "in", "self", ".", "depends", "[", "cname", "]", ":", "for", "downstream_key", "in", "self", ".", "provides", "[", "cname", "]", ":", "self", ".", "invalidate", "(", "downstream_key", ")" ]
Remove the given data item along with all items that depend on it in the graph.
[ "Remove", "the", "given", "data", "item", "along", "with", "all", "items", "that", "depend", "on", "it", "in", "the", "graph", "." ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/pipeline.py#L86-L96
train
konstantint/PassportEye
passporteye/util/ocr.py
ocr
def ocr(img, mrz_mode=True, extra_cmdline_params=''): """Runs Tesseract on a given image. Writes an intermediate tempfile and then runs the tesseract command on the image. This is a simplified modification of image_to_string from PyTesseract, which is adapted to SKImage rather than PIL. In principle we could have reimplemented it just as well - there are some apparent bugs in PyTesseract, but it works so far :) :param mrz_mode: when this is True (default) the tesseract is configured to recognize MRZs rather than arbitrary texts. When False, no specific configuration parameters are passed (and you are free to provide your own via `extra_cmdline_params`) :param extra_cmdline_params: extra parameters passed to tesseract. When mrz_mode=True, these are appended to whatever is the "best known" configuration at the moment. "--oem 0" is the parameter you might want to pass. This selects the Tesseract's "legacy" OCR engine, which often seems to work better than the new LSTM-based one. """ input_file_name = '%s.bmp' % _tempnam() output_file_name_base = '%s' % _tempnam() output_file_name = "%s.txt" % output_file_name_base try: # Prevent annoying warning about lossy conversion to uint8 if str(img.dtype).startswith('float') and np.nanmin(img) >= 0 and np.nanmax(img) <= 1: img = img.astype(np.float64) * (np.power(2.0, 8) - 1) + 0.499999999 img = img.astype(np.uint8) imwrite(input_file_name, img) if mrz_mode: # NB: Tesseract 4.0 does not seem to support tessedit_char_whitelist config = ("--psm 6 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789><" " -c load_system_dawg=F -c load_freq_dawg=F {}").format(extra_cmdline_params) else: config = "{}".format(extra_cmdline_params) pytesseract.run_tesseract(input_file_name, output_file_name_base, 'txt', lang=None, config=config) if sys.version_info.major == 3: f = open(output_file_name, encoding='utf-8') else: f = open(output_file_name) try: return f.read().strip() finally: f.close() finally: pytesseract.cleanup(input_file_name) pytesseract.cleanup(output_file_name)
python
def ocr(img, mrz_mode=True, extra_cmdline_params=''): """Runs Tesseract on a given image. Writes an intermediate tempfile and then runs the tesseract command on the image. This is a simplified modification of image_to_string from PyTesseract, which is adapted to SKImage rather than PIL. In principle we could have reimplemented it just as well - there are some apparent bugs in PyTesseract, but it works so far :) :param mrz_mode: when this is True (default) the tesseract is configured to recognize MRZs rather than arbitrary texts. When False, no specific configuration parameters are passed (and you are free to provide your own via `extra_cmdline_params`) :param extra_cmdline_params: extra parameters passed to tesseract. When mrz_mode=True, these are appended to whatever is the "best known" configuration at the moment. "--oem 0" is the parameter you might want to pass. This selects the Tesseract's "legacy" OCR engine, which often seems to work better than the new LSTM-based one. """ input_file_name = '%s.bmp' % _tempnam() output_file_name_base = '%s' % _tempnam() output_file_name = "%s.txt" % output_file_name_base try: # Prevent annoying warning about lossy conversion to uint8 if str(img.dtype).startswith('float') and np.nanmin(img) >= 0 and np.nanmax(img) <= 1: img = img.astype(np.float64) * (np.power(2.0, 8) - 1) + 0.499999999 img = img.astype(np.uint8) imwrite(input_file_name, img) if mrz_mode: # NB: Tesseract 4.0 does not seem to support tessedit_char_whitelist config = ("--psm 6 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789><" " -c load_system_dawg=F -c load_freq_dawg=F {}").format(extra_cmdline_params) else: config = "{}".format(extra_cmdline_params) pytesseract.run_tesseract(input_file_name, output_file_name_base, 'txt', lang=None, config=config) if sys.version_info.major == 3: f = open(output_file_name, encoding='utf-8') else: f = open(output_file_name) try: return f.read().strip() finally: f.close() finally: pytesseract.cleanup(input_file_name) pytesseract.cleanup(output_file_name)
[ "def", "ocr", "(", "img", ",", "mrz_mode", "=", "True", ",", "extra_cmdline_params", "=", "''", ")", ":", "input_file_name", "=", "'%s.bmp'", "%", "_tempnam", "(", ")", "output_file_name_base", "=", "'%s'", "%", "_tempnam", "(", ")", "output_file_name", "=", "\"%s.txt\"", "%", "output_file_name_base", "try", ":", "# Prevent annoying warning about lossy conversion to uint8", "if", "str", "(", "img", ".", "dtype", ")", ".", "startswith", "(", "'float'", ")", "and", "np", ".", "nanmin", "(", "img", ")", ">=", "0", "and", "np", ".", "nanmax", "(", "img", ")", "<=", "1", ":", "img", "=", "img", ".", "astype", "(", "np", ".", "float64", ")", "*", "(", "np", ".", "power", "(", "2.0", ",", "8", ")", "-", "1", ")", "+", "0.499999999", "img", "=", "img", ".", "astype", "(", "np", ".", "uint8", ")", "imwrite", "(", "input_file_name", ",", "img", ")", "if", "mrz_mode", ":", "# NB: Tesseract 4.0 does not seem to support tessedit_char_whitelist", "config", "=", "(", "\"--psm 6 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789><\"", "\" -c load_system_dawg=F -c load_freq_dawg=F {}\"", ")", ".", "format", "(", "extra_cmdline_params", ")", "else", ":", "config", "=", "\"{}\"", ".", "format", "(", "extra_cmdline_params", ")", "pytesseract", ".", "run_tesseract", "(", "input_file_name", ",", "output_file_name_base", ",", "'txt'", ",", "lang", "=", "None", ",", "config", "=", "config", ")", "if", "sys", ".", "version_info", ".", "major", "==", "3", ":", "f", "=", "open", "(", "output_file_name", ",", "encoding", "=", "'utf-8'", ")", "else", ":", "f", "=", "open", "(", "output_file_name", ")", "try", ":", "return", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "finally", ":", "f", ".", "close", "(", ")", "finally", ":", "pytesseract", ".", "cleanup", "(", "input_file_name", ")", "pytesseract", ".", "cleanup", "(", "output_file_name", ")" ]
Runs Tesseract on a given image. Writes an intermediate tempfile and then runs the tesseract command on the image. This is a simplified modification of image_to_string from PyTesseract, which is adapted to SKImage rather than PIL. In principle we could have reimplemented it just as well - there are some apparent bugs in PyTesseract, but it works so far :) :param mrz_mode: when this is True (default) the tesseract is configured to recognize MRZs rather than arbitrary texts. When False, no specific configuration parameters are passed (and you are free to provide your own via `extra_cmdline_params`) :param extra_cmdline_params: extra parameters passed to tesseract. When mrz_mode=True, these are appended to whatever is the "best known" configuration at the moment. "--oem 0" is the parameter you might want to pass. This selects the Tesseract's "legacy" OCR engine, which often seems to work better than the new LSTM-based one.
[ "Runs", "Tesseract", "on", "a", "given", "image", ".", "Writes", "an", "intermediate", "tempfile", "and", "then", "runs", "the", "tesseract", "command", "on", "the", "image", "." ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/ocr.py#L16-L64
train
konstantint/PassportEye
passporteye/util/geometry.py
RotatedBox.approx_equal
def approx_equal(self, center, width, height, angle, tol=1e-6): "Method mainly useful for testing" return abs(self.cx - center[0]) < tol and abs(self.cy - center[1]) < tol and abs(self.width - width) < tol and \ abs(self.height - height) < tol and abs(self.angle - angle) < tol
python
def approx_equal(self, center, width, height, angle, tol=1e-6): "Method mainly useful for testing" return abs(self.cx - center[0]) < tol and abs(self.cy - center[1]) < tol and abs(self.width - width) < tol and \ abs(self.height - height) < tol and abs(self.angle - angle) < tol
[ "def", "approx_equal", "(", "self", ",", "center", ",", "width", ",", "height", ",", "angle", ",", "tol", "=", "1e-6", ")", ":", "return", "abs", "(", "self", ".", "cx", "-", "center", "[", "0", "]", ")", "<", "tol", "and", "abs", "(", "self", ".", "cy", "-", "center", "[", "1", "]", ")", "<", "tol", "and", "abs", "(", "self", ".", "width", "-", "width", ")", "<", "tol", "and", "abs", "(", "self", ".", "height", "-", "height", ")", "<", "tol", "and", "abs", "(", "self", ".", "angle", "-", "angle", ")", "<", "tol" ]
Method mainly useful for testing
[ "Method", "mainly", "useful", "for", "testing" ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L49-L52
train
konstantint/PassportEye
passporteye/util/geometry.py
RotatedBox.rotated
def rotated(self, rotation_center, angle): """Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle. >>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1) """ rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]]) t = np.asfarray(rotation_center) new_c = np.dot(rot.T, (self.center - t)) + t return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2))
python
def rotated(self, rotation_center, angle): """Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle. >>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1) """ rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]]) t = np.asfarray(rotation_center) new_c = np.dot(rot.T, (self.center - t)) + t return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2))
[ "def", "rotated", "(", "self", ",", "rotation_center", ",", "angle", ")", ":", "rot", "=", "np", ".", "array", "(", "[", "[", "np", ".", "cos", "(", "angle", ")", ",", "np", ".", "sin", "(", "angle", ")", "]", ",", "[", "-", "np", ".", "sin", "(", "angle", ")", ",", "np", ".", "cos", "(", "angle", ")", "]", "]", ")", "t", "=", "np", ".", "asfarray", "(", "rotation_center", ")", "new_c", "=", "np", ".", "dot", "(", "rot", ".", "T", ",", "(", "self", ".", "center", "-", "t", ")", ")", "+", "t", "return", "RotatedBox", "(", "new_c", ",", "self", ".", "width", ",", "self", ".", "height", ",", "(", "self", ".", "angle", "+", "angle", ")", "%", "(", "np", ".", "pi", "*", "2", ")", ")" ]
Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle. >>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1)
[ "Returns", "a", "RotatedBox", "that", "is", "obtained", "by", "rotating", "this", "box", "around", "a", "given", "center", "by", "a", "given", "angle", "." ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L54-L62
train
konstantint/PassportEye
passporteye/util/geometry.py
RotatedBox.as_poly
def as_poly(self, margin_width=0, margin_height=0): """Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise. :param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion. :param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion. >>> RotatedBox([0, 0], 4, 2, 0).as_poly() array([[-2., -1.], [ 2., -1.], [ 2., 1.], [-2., 1.]]) >>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly() array([[-0.707..., -2.121...], [ 2.121..., 0.707...], [ 0.707..., 2.121...], [-2.121..., -0.707...]]) >>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly() array([[ 1., -2.], [ 1., 2.], [-1., 2.], [-1., -2.]]) >>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1) array([[ 1., -2.], [ 1., 2.], [-1., 2.], [-1., -2.]]) """ v_hor = (self.width/2 + margin_width)*np.array([np.cos(self.angle), np.sin(self.angle)]) v_vert = (self.height/2 + margin_height)*np.array([-np.sin(self.angle), np.cos(self.angle)]) c = np.array([self.cx, self.cy]) return np.vstack([c - v_hor - v_vert, c + v_hor - v_vert, c + v_hor + v_vert, c - v_hor + v_vert])
python
def as_poly(self, margin_width=0, margin_height=0): """Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise. :param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion. :param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion. >>> RotatedBox([0, 0], 4, 2, 0).as_poly() array([[-2., -1.], [ 2., -1.], [ 2., 1.], [-2., 1.]]) >>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly() array([[-0.707..., -2.121...], [ 2.121..., 0.707...], [ 0.707..., 2.121...], [-2.121..., -0.707...]]) >>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly() array([[ 1., -2.], [ 1., 2.], [-1., 2.], [-1., -2.]]) >>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1) array([[ 1., -2.], [ 1., 2.], [-1., 2.], [-1., -2.]]) """ v_hor = (self.width/2 + margin_width)*np.array([np.cos(self.angle), np.sin(self.angle)]) v_vert = (self.height/2 + margin_height)*np.array([-np.sin(self.angle), np.cos(self.angle)]) c = np.array([self.cx, self.cy]) return np.vstack([c - v_hor - v_vert, c + v_hor - v_vert, c + v_hor + v_vert, c - v_hor + v_vert])
[ "def", "as_poly", "(", "self", ",", "margin_width", "=", "0", ",", "margin_height", "=", "0", ")", ":", "v_hor", "=", "(", "self", ".", "width", "/", "2", "+", "margin_width", ")", "*", "np", ".", "array", "(", "[", "np", ".", "cos", "(", "self", ".", "angle", ")", ",", "np", ".", "sin", "(", "self", ".", "angle", ")", "]", ")", "v_vert", "=", "(", "self", ".", "height", "/", "2", "+", "margin_height", ")", "*", "np", ".", "array", "(", "[", "-", "np", ".", "sin", "(", "self", ".", "angle", ")", ",", "np", ".", "cos", "(", "self", ".", "angle", ")", "]", ")", "c", "=", "np", ".", "array", "(", "[", "self", ".", "cx", ",", "self", ".", "cy", "]", ")", "return", "np", ".", "vstack", "(", "[", "c", "-", "v_hor", "-", "v_vert", ",", "c", "+", "v_hor", "-", "v_vert", ",", "c", "+", "v_hor", "+", "v_vert", ",", "c", "-", "v_hor", "+", "v_vert", "]", ")" ]
Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise. :param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion. :param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion. >>> RotatedBox([0, 0], 4, 2, 0).as_poly() array([[-2., -1.], [ 2., -1.], [ 2., 1.], [-2., 1.]]) >>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly() array([[-0.707..., -2.121...], [ 2.121..., 0.707...], [ 0.707..., 2.121...], [-2.121..., -0.707...]]) >>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly() array([[ 1., -2.], [ 1., 2.], [-1., 2.], [-1., -2.]]) >>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1) array([[ 1., -2.], [ 1., 2.], [-1., 2.], [-1., -2.]])
[ "Converts", "this", "box", "to", "a", "polygon", "i", ".", "e", ".", "4x2", "array", "representing", "the", "four", "corners", "starting", "from", "lower", "left", "to", "upper", "left", "counterclockwise", "." ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L64-L94
train
konstantint/PassportEye
passporteye/util/geometry.py
RotatedBox.extract_from_image
def extract_from_image(self, img, scale=1.0, margin_width=5, margin_height=5): """Extracts the contents of this box from a given image. For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it. Returns an image with dimensions height*scale x width*scale. Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column), and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation") :param img: a numpy ndarray suitable for image processing via skimage. :param scale: the RotatedBox is scaled by this value before performing the extraction. This is necessary when, for example, the location of a particular feature is determined using a smaller image, yet then the corresponding area needs to be extracted from the original, larger image. The scale parameter in this case should be width_of_larger_image/width_of_smaller_image. :param margin_width: The margin that should be added to the width dimension of the box from each size. This value is given wrt actual box dimensions (i.e. not scaled). :param margin_height: The margin that should be added to the height dimension of the box from each side. :return: a numpy ndarray, corresponding to the extracted region (aligned straight). TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand. """ rotate_by = (np.pi/2 - self.angle)*180/np.pi img_rotated = transform.rotate(img, angle=rotate_by, center=[self.center[1]*scale, self.center[0]*scale], resize=True) # The resizeable transform will shift the resulting image somewhat wrt original coordinates. # When we cut out the box we will compensate for this shift. shift_c, shift_r = self._compensate_rotation_shift(img, scale) r1 = max(int((self.center[0] - self.height/2 - margin_height)*scale - shift_r), 0) r2 = int((self.center[0] + self.height/2 + margin_height)*scale - shift_r) c1 = max(int((self.center[1] - self.width/2 - margin_width)*scale - shift_c), 0) c2 = int((self.center[1] + self.width/2 + margin_width)*scale - shift_c) return img_rotated[r1:r2, c1:c2]
python
def extract_from_image(self, img, scale=1.0, margin_width=5, margin_height=5): """Extracts the contents of this box from a given image. For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it. Returns an image with dimensions height*scale x width*scale. Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column), and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation") :param img: a numpy ndarray suitable for image processing via skimage. :param scale: the RotatedBox is scaled by this value before performing the extraction. This is necessary when, for example, the location of a particular feature is determined using a smaller image, yet then the corresponding area needs to be extracted from the original, larger image. The scale parameter in this case should be width_of_larger_image/width_of_smaller_image. :param margin_width: The margin that should be added to the width dimension of the box from each size. This value is given wrt actual box dimensions (i.e. not scaled). :param margin_height: The margin that should be added to the height dimension of the box from each side. :return: a numpy ndarray, corresponding to the extracted region (aligned straight). TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand. """ rotate_by = (np.pi/2 - self.angle)*180/np.pi img_rotated = transform.rotate(img, angle=rotate_by, center=[self.center[1]*scale, self.center[0]*scale], resize=True) # The resizeable transform will shift the resulting image somewhat wrt original coordinates. # When we cut out the box we will compensate for this shift. shift_c, shift_r = self._compensate_rotation_shift(img, scale) r1 = max(int((self.center[0] - self.height/2 - margin_height)*scale - shift_r), 0) r2 = int((self.center[0] + self.height/2 + margin_height)*scale - shift_r) c1 = max(int((self.center[1] - self.width/2 - margin_width)*scale - shift_c), 0) c2 = int((self.center[1] + self.width/2 + margin_width)*scale - shift_c) return img_rotated[r1:r2, c1:c2]
[ "def", "extract_from_image", "(", "self", ",", "img", ",", "scale", "=", "1.0", ",", "margin_width", "=", "5", ",", "margin_height", "=", "5", ")", ":", "rotate_by", "=", "(", "np", ".", "pi", "/", "2", "-", "self", ".", "angle", ")", "*", "180", "/", "np", ".", "pi", "img_rotated", "=", "transform", ".", "rotate", "(", "img", ",", "angle", "=", "rotate_by", ",", "center", "=", "[", "self", ".", "center", "[", "1", "]", "*", "scale", ",", "self", ".", "center", "[", "0", "]", "*", "scale", "]", ",", "resize", "=", "True", ")", "# The resizeable transform will shift the resulting image somewhat wrt original coordinates.", "# When we cut out the box we will compensate for this shift.", "shift_c", ",", "shift_r", "=", "self", ".", "_compensate_rotation_shift", "(", "img", ",", "scale", ")", "r1", "=", "max", "(", "int", "(", "(", "self", ".", "center", "[", "0", "]", "-", "self", ".", "height", "/", "2", "-", "margin_height", ")", "*", "scale", "-", "shift_r", ")", ",", "0", ")", "r2", "=", "int", "(", "(", "self", ".", "center", "[", "0", "]", "+", "self", ".", "height", "/", "2", "+", "margin_height", ")", "*", "scale", "-", "shift_r", ")", "c1", "=", "max", "(", "int", "(", "(", "self", ".", "center", "[", "1", "]", "-", "self", ".", "width", "/", "2", "-", "margin_width", ")", "*", "scale", "-", "shift_c", ")", ",", "0", ")", "c2", "=", "int", "(", "(", "self", ".", "center", "[", "1", "]", "+", "self", ".", "width", "/", "2", "+", "margin_width", ")", "*", "scale", "-", "shift_c", ")", "return", "img_rotated", "[", "r1", ":", "r2", ",", "c1", ":", "c2", "]" ]
Extracts the contents of this box from a given image. For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it. Returns an image with dimensions height*scale x width*scale. Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column), and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation") :param img: a numpy ndarray suitable for image processing via skimage. :param scale: the RotatedBox is scaled by this value before performing the extraction. This is necessary when, for example, the location of a particular feature is determined using a smaller image, yet then the corresponding area needs to be extracted from the original, larger image. The scale parameter in this case should be width_of_larger_image/width_of_smaller_image. :param margin_width: The margin that should be added to the width dimension of the box from each size. This value is given wrt actual box dimensions (i.e. not scaled). :param margin_height: The margin that should be added to the height dimension of the box from each side. :return: a numpy ndarray, corresponding to the extracted region (aligned straight). TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand.
[ "Extracts", "the", "contents", "of", "this", "box", "from", "a", "given", "image", ".", "For", "that", "the", "image", "is", "unrotated", "by", "the", "appropriate", "angle", "and", "the", "corresponding", "part", "is", "extracted", "from", "it", "." ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L119-L149
train
konstantint/PassportEye
passporteye/mrz/image.py
read_mrz
def read_mrz(file, save_roi=False, extra_cmdline_params=''): """The main interface function to this module, encapsulating the recognition pipeline. Given an image filename, runs MRZPipeline on it, returning the parsed MRZ object. :param file: A filename or a stream to read the file data from. :param save_roi: when this is True, the .aux['roi'] field will contain the Region of Interest where the MRZ was parsed from. :param extra_cmdline_params:extra parameters to the ocr.py """ p = MRZPipeline(file, extra_cmdline_params) mrz = p.result if mrz is not None: mrz.aux['text'] = p['text'] if save_roi: mrz.aux['roi'] = p['roi'] return mrz
python
def read_mrz(file, save_roi=False, extra_cmdline_params=''): """The main interface function to this module, encapsulating the recognition pipeline. Given an image filename, runs MRZPipeline on it, returning the parsed MRZ object. :param file: A filename or a stream to read the file data from. :param save_roi: when this is True, the .aux['roi'] field will contain the Region of Interest where the MRZ was parsed from. :param extra_cmdline_params:extra parameters to the ocr.py """ p = MRZPipeline(file, extra_cmdline_params) mrz = p.result if mrz is not None: mrz.aux['text'] = p['text'] if save_roi: mrz.aux['roi'] = p['roi'] return mrz
[ "def", "read_mrz", "(", "file", ",", "save_roi", "=", "False", ",", "extra_cmdline_params", "=", "''", ")", ":", "p", "=", "MRZPipeline", "(", "file", ",", "extra_cmdline_params", ")", "mrz", "=", "p", ".", "result", "if", "mrz", "is", "not", "None", ":", "mrz", ".", "aux", "[", "'text'", "]", "=", "p", "[", "'text'", "]", "if", "save_roi", ":", "mrz", ".", "aux", "[", "'roi'", "]", "=", "p", "[", "'roi'", "]", "return", "mrz" ]
The main interface function to this module, encapsulating the recognition pipeline. Given an image filename, runs MRZPipeline on it, returning the parsed MRZ object. :param file: A filename or a stream to read the file data from. :param save_roi: when this is True, the .aux['roi'] field will contain the Region of Interest where the MRZ was parsed from. :param extra_cmdline_params:extra parameters to the ocr.py
[ "The", "main", "interface", "function", "to", "this", "module", "encapsulating", "the", "recognition", "pipeline", ".", "Given", "an", "image", "filename", "runs", "MRZPipeline", "on", "it", "returning", "the", "parsed", "MRZ", "object", "." ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L328-L343
train
konstantint/PassportEye
passporteye/mrz/image.py
Loader._imread
def _imread(self, file): """Proxy to skimage.io.imread with some fixes.""" # For now, we have to select the imageio plugin to read image from byte stream # When ski-image v0.15 is released, imageio will be the default plugin, so this # code can be simplified at that time. See issue report and pull request: # https://github.com/scikit-image/scikit-image/issues/2889 # https://github.com/scikit-image/scikit-image/pull/3126 img = skimage_io.imread(file, as_gray=self.as_gray, plugin='imageio') if img is not None and len(img.shape) != 2: # The PIL plugin somewhy fails to load some images img = skimage_io.imread(file, as_gray=self.as_gray, plugin='matplotlib') return img
python
def _imread(self, file): """Proxy to skimage.io.imread with some fixes.""" # For now, we have to select the imageio plugin to read image from byte stream # When ski-image v0.15 is released, imageio will be the default plugin, so this # code can be simplified at that time. See issue report and pull request: # https://github.com/scikit-image/scikit-image/issues/2889 # https://github.com/scikit-image/scikit-image/pull/3126 img = skimage_io.imread(file, as_gray=self.as_gray, plugin='imageio') if img is not None and len(img.shape) != 2: # The PIL plugin somewhy fails to load some images img = skimage_io.imread(file, as_gray=self.as_gray, plugin='matplotlib') return img
[ "def", "_imread", "(", "self", ",", "file", ")", ":", "# For now, we have to select the imageio plugin to read image from byte stream", "# When ski-image v0.15 is released, imageio will be the default plugin, so this", "# code can be simplified at that time. See issue report and pull request:", "# https://github.com/scikit-image/scikit-image/issues/2889", "# https://github.com/scikit-image/scikit-image/pull/3126", "img", "=", "skimage_io", ".", "imread", "(", "file", ",", "as_gray", "=", "self", ".", "as_gray", ",", "plugin", "=", "'imageio'", ")", "if", "img", "is", "not", "None", "and", "len", "(", "img", ".", "shape", ")", "!=", "2", ":", "# The PIL plugin somewhy fails to load some images", "img", "=", "skimage_io", ".", "imread", "(", "file", ",", "as_gray", "=", "self", ".", "as_gray", ",", "plugin", "=", "'matplotlib'", ")", "return", "img" ]
Proxy to skimage.io.imread with some fixes.
[ "Proxy", "to", "skimage", ".", "io", ".", "imread", "with", "some", "fixes", "." ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L30-L41
train
konstantint/PassportEye
passporteye/mrz/image.py
MRZBoxLocator._are_aligned_angles
def _are_aligned_angles(self, b1, b2): "Are two boxes aligned according to their angle?" return abs(b1 - b2) <= self.angle_tol or abs(np.pi - abs(b1 - b2)) <= self.angle_tol
python
def _are_aligned_angles(self, b1, b2): "Are two boxes aligned according to their angle?" return abs(b1 - b2) <= self.angle_tol or abs(np.pi - abs(b1 - b2)) <= self.angle_tol
[ "def", "_are_aligned_angles", "(", "self", ",", "b1", ",", "b2", ")", ":", "return", "abs", "(", "b1", "-", "b2", ")", "<=", "self", ".", "angle_tol", "or", "abs", "(", "np", ".", "pi", "-", "abs", "(", "b1", "-", "b2", ")", ")", "<=", "self", ".", "angle_tol" ]
Are two boxes aligned according to their angle?
[ "Are", "two", "boxes", "aligned", "according", "to", "their", "angle?" ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L136-L138
train
konstantint/PassportEye
passporteye/mrz/image.py
MRZBoxLocator._are_nearby_parallel_boxes
def _are_nearby_parallel_boxes(self, b1, b2): "Are two boxes nearby, parallel, and similar in width?" if not self._are_aligned_angles(b1.angle, b2.angle): return False # Otherwise pick the smaller angle and see whether the two boxes are close according to the "up" direction wrt that angle angle = min(b1.angle, b2.angle) return abs(np.dot(b1.center - b2.center, [-np.sin(angle), np.cos(angle)])) < self.lineskip_tol * ( b1.height + b2.height) and (b1.width > 0) and (b2.width > 0) and (0.5 < b1.width / b2.width < 2.0)
python
def _are_nearby_parallel_boxes(self, b1, b2): "Are two boxes nearby, parallel, and similar in width?" if not self._are_aligned_angles(b1.angle, b2.angle): return False # Otherwise pick the smaller angle and see whether the two boxes are close according to the "up" direction wrt that angle angle = min(b1.angle, b2.angle) return abs(np.dot(b1.center - b2.center, [-np.sin(angle), np.cos(angle)])) < self.lineskip_tol * ( b1.height + b2.height) and (b1.width > 0) and (b2.width > 0) and (0.5 < b1.width / b2.width < 2.0)
[ "def", "_are_nearby_parallel_boxes", "(", "self", ",", "b1", ",", "b2", ")", ":", "if", "not", "self", ".", "_are_aligned_angles", "(", "b1", ".", "angle", ",", "b2", ".", "angle", ")", ":", "return", "False", "# Otherwise pick the smaller angle and see whether the two boxes are close according to the \"up\" direction wrt that angle", "angle", "=", "min", "(", "b1", ".", "angle", ",", "b2", ".", "angle", ")", "return", "abs", "(", "np", ".", "dot", "(", "b1", ".", "center", "-", "b2", ".", "center", ",", "[", "-", "np", ".", "sin", "(", "angle", ")", ",", "np", ".", "cos", "(", "angle", ")", "]", ")", ")", "<", "self", ".", "lineskip_tol", "*", "(", "b1", ".", "height", "+", "b2", ".", "height", ")", "and", "(", "b1", ".", "width", ">", "0", ")", "and", "(", "b2", ".", "width", ">", "0", ")", "and", "(", "0.5", "<", "b1", ".", "width", "/", "b2", ".", "width", "<", "2.0", ")" ]
Are two boxes nearby, parallel, and similar in width?
[ "Are", "two", "boxes", "nearby", "parallel", "and", "similar", "in", "width?" ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L140-L147
train
konstantint/PassportEye
passporteye/mrz/image.py
MRZBoxLocator._merge_any_two_boxes
def _merge_any_two_boxes(self, box_list): """Given a list of boxes, finds two nearby parallel ones and merges them. Returns false if none found.""" n = len(box_list) for i in range(n): for j in range(i + 1, n): if self._are_nearby_parallel_boxes(box_list[i], box_list[j]): # Remove the two boxes from the list, add a new one a, b = box_list[i], box_list[j] merged_points = np.vstack([a.points, b.points]) merged_box = RotatedBox.from_points(merged_points, self.box_type) if merged_box.width / merged_box.height >= self.min_box_aspect: box_list.remove(a) box_list.remove(b) box_list.append(merged_box) return True return False
python
def _merge_any_two_boxes(self, box_list): """Given a list of boxes, finds two nearby parallel ones and merges them. Returns false if none found.""" n = len(box_list) for i in range(n): for j in range(i + 1, n): if self._are_nearby_parallel_boxes(box_list[i], box_list[j]): # Remove the two boxes from the list, add a new one a, b = box_list[i], box_list[j] merged_points = np.vstack([a.points, b.points]) merged_box = RotatedBox.from_points(merged_points, self.box_type) if merged_box.width / merged_box.height >= self.min_box_aspect: box_list.remove(a) box_list.remove(b) box_list.append(merged_box) return True return False
[ "def", "_merge_any_two_boxes", "(", "self", ",", "box_list", ")", ":", "n", "=", "len", "(", "box_list", ")", "for", "i", "in", "range", "(", "n", ")", ":", "for", "j", "in", "range", "(", "i", "+", "1", ",", "n", ")", ":", "if", "self", ".", "_are_nearby_parallel_boxes", "(", "box_list", "[", "i", "]", ",", "box_list", "[", "j", "]", ")", ":", "# Remove the two boxes from the list, add a new one", "a", ",", "b", "=", "box_list", "[", "i", "]", ",", "box_list", "[", "j", "]", "merged_points", "=", "np", ".", "vstack", "(", "[", "a", ".", "points", ",", "b", ".", "points", "]", ")", "merged_box", "=", "RotatedBox", ".", "from_points", "(", "merged_points", ",", "self", ".", "box_type", ")", "if", "merged_box", ".", "width", "/", "merged_box", ".", "height", ">=", "self", ".", "min_box_aspect", ":", "box_list", ".", "remove", "(", "a", ")", "box_list", ".", "remove", "(", "b", ")", "box_list", ".", "append", "(", "merged_box", ")", "return", "True", "return", "False" ]
Given a list of boxes, finds two nearby parallel ones and merges them. Returns false if none found.
[ "Given", "a", "list", "of", "boxes", "finds", "two", "nearby", "parallel", "ones", "and", "merges", "them", ".", "Returns", "false", "if", "none", "found", "." ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L149-L164
train
konstantint/PassportEye
passporteye/mrz/image.py
BoxToMRZ._try_larger_image
def _try_larger_image(self, roi, cur_text, cur_mrz, filter_order=3): """Attempts to improve the OCR result by scaling the image. If the new mrz is better, returns it, otherwise returns the old mrz.""" if roi.shape[1] <= 700: scale_by = int(1050.0 / roi.shape[1] + 0.5) roi_lg = transform.rescale(roi, scale_by, order=filter_order, mode='constant', multichannel=False, anti_aliasing=True) new_text = ocr(roi_lg, extra_cmdline_params=self.extra_cmdline_params) new_mrz = MRZ.from_ocr(new_text) new_mrz.aux['method'] = 'rescaled(%d)' % filter_order if new_mrz.valid_score > cur_mrz.valid_score: cur_mrz = new_mrz cur_text = new_text return cur_text, cur_mrz
python
def _try_larger_image(self, roi, cur_text, cur_mrz, filter_order=3): """Attempts to improve the OCR result by scaling the image. If the new mrz is better, returns it, otherwise returns the old mrz.""" if roi.shape[1] <= 700: scale_by = int(1050.0 / roi.shape[1] + 0.5) roi_lg = transform.rescale(roi, scale_by, order=filter_order, mode='constant', multichannel=False, anti_aliasing=True) new_text = ocr(roi_lg, extra_cmdline_params=self.extra_cmdline_params) new_mrz = MRZ.from_ocr(new_text) new_mrz.aux['method'] = 'rescaled(%d)' % filter_order if new_mrz.valid_score > cur_mrz.valid_score: cur_mrz = new_mrz cur_text = new_text return cur_text, cur_mrz
[ "def", "_try_larger_image", "(", "self", ",", "roi", ",", "cur_text", ",", "cur_mrz", ",", "filter_order", "=", "3", ")", ":", "if", "roi", ".", "shape", "[", "1", "]", "<=", "700", ":", "scale_by", "=", "int", "(", "1050.0", "/", "roi", ".", "shape", "[", "1", "]", "+", "0.5", ")", "roi_lg", "=", "transform", ".", "rescale", "(", "roi", ",", "scale_by", ",", "order", "=", "filter_order", ",", "mode", "=", "'constant'", ",", "multichannel", "=", "False", ",", "anti_aliasing", "=", "True", ")", "new_text", "=", "ocr", "(", "roi_lg", ",", "extra_cmdline_params", "=", "self", ".", "extra_cmdline_params", ")", "new_mrz", "=", "MRZ", ".", "from_ocr", "(", "new_text", ")", "new_mrz", ".", "aux", "[", "'method'", "]", "=", "'rescaled(%d)'", "%", "filter_order", "if", "new_mrz", ".", "valid_score", ">", "cur_mrz", ".", "valid_score", ":", "cur_mrz", "=", "new_mrz", "cur_text", "=", "new_text", "return", "cur_text", ",", "cur_mrz" ]
Attempts to improve the OCR result by scaling the image. If the new mrz is better, returns it, otherwise returns the old mrz.
[ "Attempts", "to", "improve", "the", "OCR", "result", "by", "scaling", "the", "image", ".", "If", "the", "new", "mrz", "is", "better", "returns", "it", "otherwise", "returns", "the", "old", "mrz", "." ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L254-L267
train
konstantint/PassportEye
passporteye/mrz/scripts.py
mrz
def mrz(): """ Command-line script for extracting MRZ from a given image """ parser = argparse.ArgumentParser(description='Run the MRZ OCR recognition algorithm on the given image.') parser.add_argument('filename') parser.add_argument('--json', action='store_true', help='Produce JSON (rather than tabular) output') parser.add_argument('--legacy', action='store_true', help='Use the "legacy" Tesseract OCR engine (--oem 0). Despite the name, it most often results in better ' 'results. It is not the default option, because it will only work if ' 'your Tesseract installation includes the legacy *.traineddata files. You can download them at ' 'https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016') parser.add_argument('-r', '--save-roi', default=None, help='Output the region of the image that is detected to contain the MRZ to the given png file') parser.add_argument('--version', action='version', version='PassportEye MRZ v%s' % passporteye.__version__) args = parser.parse_args() try: extra_params = '--oem 0' if args.legacy else '' filename, mrz_, walltime = process_file((args.filename, args.save_roi is not None, extra_params)) except TesseractNotFoundError: sys.stderr.write("ERROR: The tesseract executable was not found.\n" "Please, make sure Tesseract is installed and the appropriate directory is included " "in your PATH environment variable.\n") sys.exit(1) except TesseractError as ex: sys.stderr.write("ERROR: %s" % ex.message) sys.exit(ex.status) d = mrz_.to_dict() if mrz_ is not None else {'mrz_type': None, 'valid': False, 'valid_score': 0} d['walltime'] = walltime d['filename'] = filename if args.save_roi is not None and mrz_ is not None and 'roi' in mrz_.aux: io.imsave(args.save_roi, mrz_.aux['roi']) if not args.json: for k in d: print("%s\t%s" % (k, str(d[k]))) else: print(json.dumps(d, indent=2))
python
def mrz(): """ Command-line script for extracting MRZ from a given image """ parser = argparse.ArgumentParser(description='Run the MRZ OCR recognition algorithm on the given image.') parser.add_argument('filename') parser.add_argument('--json', action='store_true', help='Produce JSON (rather than tabular) output') parser.add_argument('--legacy', action='store_true', help='Use the "legacy" Tesseract OCR engine (--oem 0). Despite the name, it most often results in better ' 'results. It is not the default option, because it will only work if ' 'your Tesseract installation includes the legacy *.traineddata files. You can download them at ' 'https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016') parser.add_argument('-r', '--save-roi', default=None, help='Output the region of the image that is detected to contain the MRZ to the given png file') parser.add_argument('--version', action='version', version='PassportEye MRZ v%s' % passporteye.__version__) args = parser.parse_args() try: extra_params = '--oem 0' if args.legacy else '' filename, mrz_, walltime = process_file((args.filename, args.save_roi is not None, extra_params)) except TesseractNotFoundError: sys.stderr.write("ERROR: The tesseract executable was not found.\n" "Please, make sure Tesseract is installed and the appropriate directory is included " "in your PATH environment variable.\n") sys.exit(1) except TesseractError as ex: sys.stderr.write("ERROR: %s" % ex.message) sys.exit(ex.status) d = mrz_.to_dict() if mrz_ is not None else {'mrz_type': None, 'valid': False, 'valid_score': 0} d['walltime'] = walltime d['filename'] = filename if args.save_roi is not None and mrz_ is not None and 'roi' in mrz_.aux: io.imsave(args.save_roi, mrz_.aux['roi']) if not args.json: for k in d: print("%s\t%s" % (k, str(d[k]))) else: print(json.dumps(d, indent=2))
[ "def", "mrz", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Run the MRZ OCR recognition algorithm on the given image.'", ")", "parser", ".", "add_argument", "(", "'filename'", ")", "parser", ".", "add_argument", "(", "'--json'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Produce JSON (rather than tabular) output'", ")", "parser", ".", "add_argument", "(", "'--legacy'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Use the \"legacy\" Tesseract OCR engine (--oem 0). Despite the name, it most often results in better '", "'results. It is not the default option, because it will only work if '", "'your Tesseract installation includes the legacy *.traineddata files. You can download them at '", "'https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016'", ")", "parser", ".", "add_argument", "(", "'-r'", ",", "'--save-roi'", ",", "default", "=", "None", ",", "help", "=", "'Output the region of the image that is detected to contain the MRZ to the given png file'", ")", "parser", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "'PassportEye MRZ v%s'", "%", "passporteye", ".", "__version__", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "try", ":", "extra_params", "=", "'--oem 0'", "if", "args", ".", "legacy", "else", "''", "filename", ",", "mrz_", ",", "walltime", "=", "process_file", "(", "(", "args", ".", "filename", ",", "args", ".", "save_roi", "is", "not", "None", ",", "extra_params", ")", ")", "except", "TesseractNotFoundError", ":", "sys", ".", "stderr", ".", "write", "(", "\"ERROR: The tesseract executable was not found.\\n\"", "\"Please, make sure Tesseract is installed and the appropriate directory is included \"", "\"in your PATH environment variable.\\n\"", ")", "sys", ".", "exit", "(", "1", ")", "except", "TesseractError", "as", "ex", ":", "sys", ".", "stderr", ".", "write", "(", "\"ERROR: %s\"", "%", "ex", ".", "message", ")", "sys", ".", "exit", "(", "ex", ".", "status", ")", "d", "=", "mrz_", ".", "to_dict", "(", ")", "if", "mrz_", "is", "not", "None", "else", "{", "'mrz_type'", ":", "None", ",", "'valid'", ":", "False", ",", "'valid_score'", ":", "0", "}", "d", "[", "'walltime'", "]", "=", "walltime", "d", "[", "'filename'", "]", "=", "filename", "if", "args", ".", "save_roi", "is", "not", "None", "and", "mrz_", "is", "not", "None", "and", "'roi'", "in", "mrz_", ".", "aux", ":", "io", ".", "imsave", "(", "args", ".", "save_roi", ",", "mrz_", ".", "aux", "[", "'roi'", "]", ")", "if", "not", "args", ".", "json", ":", "for", "k", "in", "d", ":", "print", "(", "\"%s\\t%s\"", "%", "(", "k", ",", "str", "(", "d", "[", "k", "]", ")", ")", ")", "else", ":", "print", "(", "json", ".", "dumps", "(", "d", ",", "indent", "=", "2", ")", ")" ]
Command-line script for extracting MRZ from a given image
[ "Command", "-", "line", "script", "for", "extracting", "MRZ", "from", "a", "given", "image" ]
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/scripts.py#L134-L174
train
glitchassassin/lackey
lackey/PlatformManagerWindows.py
PlatformManagerWindows._check_count
def _check_count(self, result, func, args): #pylint: disable=unused-argument """ Private function to return ctypes errors cleanly """ if result == 0: raise ctypes.WinError(ctypes.get_last_error()) return args
python
def _check_count(self, result, func, args): #pylint: disable=unused-argument """ Private function to return ctypes errors cleanly """ if result == 0: raise ctypes.WinError(ctypes.get_last_error()) return args
[ "def", "_check_count", "(", "self", ",", "result", ",", "func", ",", "args", ")", ":", "#pylint: disable=unused-argument", "if", "result", "==", "0", ":", "raise", "ctypes", ".", "WinError", "(", "ctypes", ".", "get_last_error", "(", ")", ")", "return", "args" ]
Private function to return ctypes errors cleanly
[ "Private", "function", "to", "return", "ctypes", "errors", "cleanly" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerWindows.py#L210-L215
train
glitchassassin/lackey
lackey/PlatformManagerWindows.py
PlatformManagerWindows._getMonitorInfo
def _getMonitorInfo(self): """ Returns info about the attached monitors, in device order [0] is always the primary monitor """ monitors = [] CCHDEVICENAME = 32 def _MonitorEnumProcCallback(hMonitor, hdcMonitor, lprcMonitor, dwData): class MONITORINFOEX(ctypes.Structure): _fields_ = [("cbSize", ctypes.wintypes.DWORD), ("rcMonitor", ctypes.wintypes.RECT), ("rcWork", ctypes.wintypes.RECT), ("dwFlags", ctypes.wintypes.DWORD), ("szDevice", ctypes.wintypes.WCHAR*CCHDEVICENAME)] lpmi = MONITORINFOEX() lpmi.cbSize = ctypes.sizeof(MONITORINFOEX) self._user32.GetMonitorInfoW(hMonitor, ctypes.byref(lpmi)) #hdc = self._gdi32.CreateDCA(ctypes.c_char_p(lpmi.szDevice), 0, 0, 0) monitors.append({ "hmon": hMonitor, #"hdc": hdc, "rect": (lprcMonitor.contents.left, lprcMonitor.contents.top, lprcMonitor.contents.right, lprcMonitor.contents.bottom), "name": lpmi.szDevice }) return True MonitorEnumProc = ctypes.WINFUNCTYPE( ctypes.c_bool, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(ctypes.wintypes.RECT), ctypes.c_int) callback = MonitorEnumProc(_MonitorEnumProcCallback) if self._user32.EnumDisplayMonitors(0, 0, callback, 0) == 0: raise WindowsError("Unable to enumerate monitors") # Clever magic to make the screen with origin of (0,0) [the primary monitor] # the first in the list # Sort by device ID - 0 is primary, 1 is next, etc. monitors.sort(key=lambda x: (not (x["rect"][0] == 0 and x["rect"][1] == 0), x["name"])) return monitors
python
def _getMonitorInfo(self): """ Returns info about the attached monitors, in device order [0] is always the primary monitor """ monitors = [] CCHDEVICENAME = 32 def _MonitorEnumProcCallback(hMonitor, hdcMonitor, lprcMonitor, dwData): class MONITORINFOEX(ctypes.Structure): _fields_ = [("cbSize", ctypes.wintypes.DWORD), ("rcMonitor", ctypes.wintypes.RECT), ("rcWork", ctypes.wintypes.RECT), ("dwFlags", ctypes.wintypes.DWORD), ("szDevice", ctypes.wintypes.WCHAR*CCHDEVICENAME)] lpmi = MONITORINFOEX() lpmi.cbSize = ctypes.sizeof(MONITORINFOEX) self._user32.GetMonitorInfoW(hMonitor, ctypes.byref(lpmi)) #hdc = self._gdi32.CreateDCA(ctypes.c_char_p(lpmi.szDevice), 0, 0, 0) monitors.append({ "hmon": hMonitor, #"hdc": hdc, "rect": (lprcMonitor.contents.left, lprcMonitor.contents.top, lprcMonitor.contents.right, lprcMonitor.contents.bottom), "name": lpmi.szDevice }) return True MonitorEnumProc = ctypes.WINFUNCTYPE( ctypes.c_bool, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(ctypes.wintypes.RECT), ctypes.c_int) callback = MonitorEnumProc(_MonitorEnumProcCallback) if self._user32.EnumDisplayMonitors(0, 0, callback, 0) == 0: raise WindowsError("Unable to enumerate monitors") # Clever magic to make the screen with origin of (0,0) [the primary monitor] # the first in the list # Sort by device ID - 0 is primary, 1 is next, etc. monitors.sort(key=lambda x: (not (x["rect"][0] == 0 and x["rect"][1] == 0), x["name"])) return monitors
[ "def", "_getMonitorInfo", "(", "self", ")", ":", "monitors", "=", "[", "]", "CCHDEVICENAME", "=", "32", "def", "_MonitorEnumProcCallback", "(", "hMonitor", ",", "hdcMonitor", ",", "lprcMonitor", ",", "dwData", ")", ":", "class", "MONITORINFOEX", "(", "ctypes", ".", "Structure", ")", ":", "_fields_", "=", "[", "(", "\"cbSize\"", ",", "ctypes", ".", "wintypes", ".", "DWORD", ")", ",", "(", "\"rcMonitor\"", ",", "ctypes", ".", "wintypes", ".", "RECT", ")", ",", "(", "\"rcWork\"", ",", "ctypes", ".", "wintypes", ".", "RECT", ")", ",", "(", "\"dwFlags\"", ",", "ctypes", ".", "wintypes", ".", "DWORD", ")", ",", "(", "\"szDevice\"", ",", "ctypes", ".", "wintypes", ".", "WCHAR", "*", "CCHDEVICENAME", ")", "]", "lpmi", "=", "MONITORINFOEX", "(", ")", "lpmi", ".", "cbSize", "=", "ctypes", ".", "sizeof", "(", "MONITORINFOEX", ")", "self", ".", "_user32", ".", "GetMonitorInfoW", "(", "hMonitor", ",", "ctypes", ".", "byref", "(", "lpmi", ")", ")", "#hdc = self._gdi32.CreateDCA(ctypes.c_char_p(lpmi.szDevice), 0, 0, 0)", "monitors", ".", "append", "(", "{", "\"hmon\"", ":", "hMonitor", ",", "#\"hdc\": hdc,", "\"rect\"", ":", "(", "lprcMonitor", ".", "contents", ".", "left", ",", "lprcMonitor", ".", "contents", ".", "top", ",", "lprcMonitor", ".", "contents", ".", "right", ",", "lprcMonitor", ".", "contents", ".", "bottom", ")", ",", "\"name\"", ":", "lpmi", ".", "szDevice", "}", ")", "return", "True", "MonitorEnumProc", "=", "ctypes", ".", "WINFUNCTYPE", "(", "ctypes", ".", "c_bool", ",", "ctypes", ".", "c_ulong", ",", "ctypes", ".", "c_ulong", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "wintypes", ".", "RECT", ")", ",", "ctypes", ".", "c_int", ")", "callback", "=", "MonitorEnumProc", "(", "_MonitorEnumProcCallback", ")", "if", "self", ".", "_user32", ".", "EnumDisplayMonitors", "(", "0", ",", "0", ",", "callback", ",", "0", ")", "==", "0", ":", "raise", "WindowsError", "(", "\"Unable to enumerate monitors\"", ")", "# Clever magic to make the screen with origin of (0,0) [the primary monitor]", "# the first in the list", "# Sort by device ID - 0 is primary, 1 is next, etc.", "monitors", ".", "sort", "(", "key", "=", "lambda", "x", ":", "(", "not", "(", "x", "[", "\"rect\"", "]", "[", "0", "]", "==", "0", "and", "x", "[", "\"rect\"", "]", "[", "1", "]", "==", "0", ")", ",", "x", "[", "\"name\"", "]", ")", ")", "return", "monitors" ]
Returns info about the attached monitors, in device order [0] is always the primary monitor
[ "Returns", "info", "about", "the", "attached", "monitors", "in", "device", "order" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerWindows.py#L401-L443
train
glitchassassin/lackey
lackey/PlatformManagerWindows.py
PlatformManagerWindows._getVirtualScreenRect
def _getVirtualScreenRect(self): """ The virtual screen is the bounding box containing all monitors. Not all regions in the virtual screen are actually visible. The (0,0) coordinate is the top left corner of the primary screen rather than the whole bounding box, so some regions of the virtual screen may have negative coordinates if another screen is positioned in Windows as further to the left or above the primary screen. Returns the rect as (x, y, w, h) """ SM_XVIRTUALSCREEN = 76 # Left of virtual screen SM_YVIRTUALSCREEN = 77 # Top of virtual screen SM_CXVIRTUALSCREEN = 78 # Width of virtual screen SM_CYVIRTUALSCREEN = 79 # Height of virtual screen return (self._user32.GetSystemMetrics(SM_XVIRTUALSCREEN), \ self._user32.GetSystemMetrics(SM_YVIRTUALSCREEN), \ self._user32.GetSystemMetrics(SM_CXVIRTUALSCREEN), \ self._user32.GetSystemMetrics(SM_CYVIRTUALSCREEN))
python
def _getVirtualScreenRect(self): """ The virtual screen is the bounding box containing all monitors. Not all regions in the virtual screen are actually visible. The (0,0) coordinate is the top left corner of the primary screen rather than the whole bounding box, so some regions of the virtual screen may have negative coordinates if another screen is positioned in Windows as further to the left or above the primary screen. Returns the rect as (x, y, w, h) """ SM_XVIRTUALSCREEN = 76 # Left of virtual screen SM_YVIRTUALSCREEN = 77 # Top of virtual screen SM_CXVIRTUALSCREEN = 78 # Width of virtual screen SM_CYVIRTUALSCREEN = 79 # Height of virtual screen return (self._user32.GetSystemMetrics(SM_XVIRTUALSCREEN), \ self._user32.GetSystemMetrics(SM_YVIRTUALSCREEN), \ self._user32.GetSystemMetrics(SM_CXVIRTUALSCREEN), \ self._user32.GetSystemMetrics(SM_CYVIRTUALSCREEN))
[ "def", "_getVirtualScreenRect", "(", "self", ")", ":", "SM_XVIRTUALSCREEN", "=", "76", "# Left of virtual screen", "SM_YVIRTUALSCREEN", "=", "77", "# Top of virtual screen", "SM_CXVIRTUALSCREEN", "=", "78", "# Width of virtual screen", "SM_CYVIRTUALSCREEN", "=", "79", "# Height of virtual screen", "return", "(", "self", ".", "_user32", ".", "GetSystemMetrics", "(", "SM_XVIRTUALSCREEN", ")", ",", "self", ".", "_user32", ".", "GetSystemMetrics", "(", "SM_YVIRTUALSCREEN", ")", ",", "self", ".", "_user32", ".", "GetSystemMetrics", "(", "SM_CXVIRTUALSCREEN", ")", ",", "self", ".", "_user32", ".", "GetSystemMetrics", "(", "SM_CYVIRTUALSCREEN", ")", ")" ]
The virtual screen is the bounding box containing all monitors. Not all regions in the virtual screen are actually visible. The (0,0) coordinate is the top left corner of the primary screen rather than the whole bounding box, so some regions of the virtual screen may have negative coordinates if another screen is positioned in Windows as further to the left or above the primary screen. Returns the rect as (x, y, w, h)
[ "The", "virtual", "screen", "is", "the", "bounding", "box", "containing", "all", "monitors", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerWindows.py#L444-L462
train
glitchassassin/lackey
lackey/PlatformManagerWindows.py
PlatformManagerWindows.osPaste
def osPaste(self): """ Triggers the OS "paste" keyboard shortcut """ from .InputEmulation import Keyboard k = Keyboard() k.keyDown("{CTRL}") k.type("v") k.keyUp("{CTRL}")
python
def osPaste(self): """ Triggers the OS "paste" keyboard shortcut """ from .InputEmulation import Keyboard k = Keyboard() k.keyDown("{CTRL}") k.type("v") k.keyUp("{CTRL}")
[ "def", "osPaste", "(", "self", ")", ":", "from", ".", "InputEmulation", "import", "Keyboard", "k", "=", "Keyboard", "(", ")", "k", ".", "keyDown", "(", "\"{CTRL}\"", ")", "k", ".", "type", "(", "\"v\"", ")", "k", ".", "keyUp", "(", "\"{CTRL}\"", ")" ]
Triggers the OS "paste" keyboard shortcut
[ "Triggers", "the", "OS", "paste", "keyboard", "shortcut" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerWindows.py#L497-L503
train
glitchassassin/lackey
lackey/PlatformManagerWindows.py
PlatformManagerWindows.focusWindow
def focusWindow(self, hwnd): """ Brings specified window to the front """ Debug.log(3, "Focusing window: " + str(hwnd)) SW_RESTORE = 9 if ctypes.windll.user32.IsIconic(hwnd): ctypes.windll.user32.ShowWindow(hwnd, SW_RESTORE) ctypes.windll.user32.SetForegroundWindow(hwnd)
python
def focusWindow(self, hwnd): """ Brings specified window to the front """ Debug.log(3, "Focusing window: " + str(hwnd)) SW_RESTORE = 9 if ctypes.windll.user32.IsIconic(hwnd): ctypes.windll.user32.ShowWindow(hwnd, SW_RESTORE) ctypes.windll.user32.SetForegroundWindow(hwnd)
[ "def", "focusWindow", "(", "self", ",", "hwnd", ")", ":", "Debug", ".", "log", "(", "3", ",", "\"Focusing window: \"", "+", "str", "(", "hwnd", ")", ")", "SW_RESTORE", "=", "9", "if", "ctypes", ".", "windll", ".", "user32", ".", "IsIconic", "(", "hwnd", ")", ":", "ctypes", ".", "windll", ".", "user32", ".", "ShowWindow", "(", "hwnd", ",", "SW_RESTORE", ")", "ctypes", ".", "windll", ".", "user32", ".", "SetForegroundWindow", "(", "hwnd", ")" ]
Brings specified window to the front
[ "Brings", "specified", "window", "to", "the", "front" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerWindows.py#L557-L563
train
glitchassassin/lackey
lackey/PlatformManagerWindows.py
PlatformManagerWindows.isPIDValid
def isPIDValid(self, pid): """ Checks if a PID is associated with a running process """ ## Slightly copied wholesale from http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid ## Thanks to http://stackoverflow.com/users/1777162/ntrrgc and http://stackoverflow.com/users/234270/speedplane class ExitCodeProcess(ctypes.Structure): _fields_ = [('hProcess', ctypes.c_void_p), ('lpExitCode', ctypes.POINTER(ctypes.c_ulong))] SYNCHRONIZE = 0x100000 PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 process = self._kernel32.OpenProcess(SYNCHRONIZE|PROCESS_QUERY_LIMITED_INFORMATION, 0, pid) if not process: return False ec = ExitCodeProcess() out = self._kernel32.GetExitCodeProcess(process, ctypes.byref(ec)) if not out: err = self._kernel32.GetLastError() if self._kernel32.GetLastError() == 5: # Access is denied. logging.warning("Access is denied to get pid info.") self._kernel32.CloseHandle(process) return False elif bool(ec.lpExitCode): # There is an exit code, it quit self._kernel32.CloseHandle(process) return False # No exit code, it's running. self._kernel32.CloseHandle(process) return True
python
def isPIDValid(self, pid): """ Checks if a PID is associated with a running process """ ## Slightly copied wholesale from http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid ## Thanks to http://stackoverflow.com/users/1777162/ntrrgc and http://stackoverflow.com/users/234270/speedplane class ExitCodeProcess(ctypes.Structure): _fields_ = [('hProcess', ctypes.c_void_p), ('lpExitCode', ctypes.POINTER(ctypes.c_ulong))] SYNCHRONIZE = 0x100000 PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 process = self._kernel32.OpenProcess(SYNCHRONIZE|PROCESS_QUERY_LIMITED_INFORMATION, 0, pid) if not process: return False ec = ExitCodeProcess() out = self._kernel32.GetExitCodeProcess(process, ctypes.byref(ec)) if not out: err = self._kernel32.GetLastError() if self._kernel32.GetLastError() == 5: # Access is denied. logging.warning("Access is denied to get pid info.") self._kernel32.CloseHandle(process) return False elif bool(ec.lpExitCode): # There is an exit code, it quit self._kernel32.CloseHandle(process) return False # No exit code, it's running. self._kernel32.CloseHandle(process) return True
[ "def", "isPIDValid", "(", "self", ",", "pid", ")", ":", "## Slightly copied wholesale from http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid", "## Thanks to http://stackoverflow.com/users/1777162/ntrrgc and http://stackoverflow.com/users/234270/speedplane", "class", "ExitCodeProcess", "(", "ctypes", ".", "Structure", ")", ":", "_fields_", "=", "[", "(", "'hProcess'", ",", "ctypes", ".", "c_void_p", ")", ",", "(", "'lpExitCode'", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_ulong", ")", ")", "]", "SYNCHRONIZE", "=", "0x100000", "PROCESS_QUERY_LIMITED_INFORMATION", "=", "0x1000", "process", "=", "self", ".", "_kernel32", ".", "OpenProcess", "(", "SYNCHRONIZE", "|", "PROCESS_QUERY_LIMITED_INFORMATION", ",", "0", ",", "pid", ")", "if", "not", "process", ":", "return", "False", "ec", "=", "ExitCodeProcess", "(", ")", "out", "=", "self", ".", "_kernel32", ".", "GetExitCodeProcess", "(", "process", ",", "ctypes", ".", "byref", "(", "ec", ")", ")", "if", "not", "out", ":", "err", "=", "self", ".", "_kernel32", ".", "GetLastError", "(", ")", "if", "self", ".", "_kernel32", ".", "GetLastError", "(", ")", "==", "5", ":", "# Access is denied.", "logging", ".", "warning", "(", "\"Access is denied to get pid info.\"", ")", "self", ".", "_kernel32", ".", "CloseHandle", "(", "process", ")", "return", "False", "elif", "bool", "(", "ec", ".", "lpExitCode", ")", ":", "# There is an exit code, it quit", "self", ".", "_kernel32", ".", "CloseHandle", "(", "process", ")", "return", "False", "# No exit code, it's running.", "self", ".", "_kernel32", ".", "CloseHandle", "(", "process", ")", "return", "True" ]
Checks if a PID is associated with a running process
[ "Checks", "if", "a", "PID", "is", "associated", "with", "a", "running", "process" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/PlatformManagerWindows.py#L608-L635
train
glitchassassin/lackey
lackey/RegionMatching.py
Pattern.similar
def similar(self, similarity): """ Returns a new Pattern with the specified similarity threshold """ pattern = Pattern(self.path) pattern.similarity = similarity return pattern
python
def similar(self, similarity): """ Returns a new Pattern with the specified similarity threshold """ pattern = Pattern(self.path) pattern.similarity = similarity return pattern
[ "def", "similar", "(", "self", ",", "similarity", ")", ":", "pattern", "=", "Pattern", "(", "self", ".", "path", ")", "pattern", ".", "similarity", "=", "similarity", "return", "pattern" ]
Returns a new Pattern with the specified similarity threshold
[ "Returns", "a", "new", "Pattern", "with", "the", "specified", "similarity", "threshold" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L73-L77
train
glitchassassin/lackey
lackey/RegionMatching.py
Pattern.targetOffset
def targetOffset(self, dx, dy): """ Returns a new Pattern with the given target offset """ pattern = Pattern(self.path) pattern.similarity = self.similarity pattern.offset = Location(dx, dy) return pattern
python
def targetOffset(self, dx, dy): """ Returns a new Pattern with the given target offset """ pattern = Pattern(self.path) pattern.similarity = self.similarity pattern.offset = Location(dx, dy) return pattern
[ "def", "targetOffset", "(", "self", ",", "dx", ",", "dy", ")", ":", "pattern", "=", "Pattern", "(", "self", ".", "path", ")", "pattern", ".", "similarity", "=", "self", ".", "similarity", "pattern", ".", "offset", "=", "Location", "(", "dx", ",", "dy", ")", "return", "pattern" ]
Returns a new Pattern with the given target offset
[ "Returns", "a", "new", "Pattern", "with", "the", "given", "target", "offset" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L88-L93
train
glitchassassin/lackey
lackey/RegionMatching.py
Pattern.debugPreview
def debugPreview(self, title="Debug"): """ Loads and displays the image at ``Pattern.path`` """ haystack = Image.open(self.path) haystack.show()
python
def debugPreview(self, title="Debug"): """ Loads and displays the image at ``Pattern.path`` """ haystack = Image.open(self.path) haystack.show()
[ "def", "debugPreview", "(", "self", ",", "title", "=", "\"Debug\"", ")", ":", "haystack", "=", "Image", ".", "open", "(", "self", ".", "path", ")", "haystack", ".", "show", "(", ")" ]
Loads and displays the image at ``Pattern.path``
[ "Loads", "and", "displays", "the", "image", "at", "Pattern", ".", "path" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L127-L130
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.setLocation
def setLocation(self, location): """ Change the upper left-hand corner to a new ``Location`` Doesn't change width or height """ if not location or not isinstance(location, Location): raise ValueError("setLocation expected a Location object") self.x = location.x self.y = location.y return self
python
def setLocation(self, location): """ Change the upper left-hand corner to a new ``Location`` Doesn't change width or height """ if not location or not isinstance(location, Location): raise ValueError("setLocation expected a Location object") self.x = location.x self.y = location.y return self
[ "def", "setLocation", "(", "self", ",", "location", ")", ":", "if", "not", "location", "or", "not", "isinstance", "(", "location", ",", "Location", ")", ":", "raise", "ValueError", "(", "\"setLocation expected a Location object\"", ")", "self", ".", "x", "=", "location", ".", "x", "self", ".", "y", "=", "location", ".", "y", "return", "self" ]
Change the upper left-hand corner to a new ``Location`` Doesn't change width or height
[ "Change", "the", "upper", "left", "-", "hand", "corner", "to", "a", "new", "Location" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L222-L231
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.contains
def contains(self, point_or_region): """ Checks if ``point_or_region`` is within this region """ if isinstance(point_or_region, Location): return (self.x < point_or_region.x < self.x + self.w) and (self.y < point_or_region.y < self.y + self.h) elif isinstance(point_or_region, Region): return ((self.x < point_or_region.getX() < self.x + self.w) and (self.y < point_or_region.getY() < self.y + self.h) and (self.x < point_or_region.getX() + point_or_region.getW() < self.x + self.w) and (self.y < point_or_region.getY() + point_or_region.getH() < self.y + self.h)) else: raise TypeError("Unrecognized argument type for contains()")
python
def contains(self, point_or_region): """ Checks if ``point_or_region`` is within this region """ if isinstance(point_or_region, Location): return (self.x < point_or_region.x < self.x + self.w) and (self.y < point_or_region.y < self.y + self.h) elif isinstance(point_or_region, Region): return ((self.x < point_or_region.getX() < self.x + self.w) and (self.y < point_or_region.getY() < self.y + self.h) and (self.x < point_or_region.getX() + point_or_region.getW() < self.x + self.w) and (self.y < point_or_region.getY() + point_or_region.getH() < self.y + self.h)) else: raise TypeError("Unrecognized argument type for contains()")
[ "def", "contains", "(", "self", ",", "point_or_region", ")", ":", "if", "isinstance", "(", "point_or_region", ",", "Location", ")", ":", "return", "(", "self", ".", "x", "<", "point_or_region", ".", "x", "<", "self", ".", "x", "+", "self", ".", "w", ")", "and", "(", "self", ".", "y", "<", "point_or_region", ".", "y", "<", "self", ".", "y", "+", "self", ".", "h", ")", "elif", "isinstance", "(", "point_or_region", ",", "Region", ")", ":", "return", "(", "(", "self", ".", "x", "<", "point_or_region", ".", "getX", "(", ")", "<", "self", ".", "x", "+", "self", ".", "w", ")", "and", "(", "self", ".", "y", "<", "point_or_region", ".", "getY", "(", ")", "<", "self", ".", "y", "+", "self", ".", "h", ")", "and", "(", "self", ".", "x", "<", "point_or_region", ".", "getX", "(", ")", "+", "point_or_region", ".", "getW", "(", ")", "<", "self", ".", "x", "+", "self", ".", "w", ")", "and", "(", "self", ".", "y", "<", "point_or_region", ".", "getY", "(", ")", "+", "point_or_region", ".", "getH", "(", ")", "<", "self", ".", "y", "+", "self", ".", "h", ")", ")", "else", ":", "raise", "TypeError", "(", "\"Unrecognized argument type for contains()\"", ")" ]
Checks if ``point_or_region`` is within this region
[ "Checks", "if", "point_or_region", "is", "within", "this", "region" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L251-L261
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.morphTo
def morphTo(self, region): """ Change shape of this region to match the given ``Region`` object """ if not region or not isinstance(region, Region): raise TypeError("morphTo expected a Region object") self.setROI(region) return self
python
def morphTo(self, region): """ Change shape of this region to match the given ``Region`` object """ if not region or not isinstance(region, Region): raise TypeError("morphTo expected a Region object") self.setROI(region) return self
[ "def", "morphTo", "(", "self", ",", "region", ")", ":", "if", "not", "region", "or", "not", "isinstance", "(", "region", ",", "Region", ")", ":", "raise", "TypeError", "(", "\"morphTo expected a Region object\"", ")", "self", ".", "setROI", "(", "region", ")", "return", "self" ]
Change shape of this region to match the given ``Region`` object
[ "Change", "shape", "of", "this", "region", "to", "match", "the", "given", "Region", "object" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L264-L269
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.getCenter
def getCenter(self): """ Return the ``Location`` of the center of this region """ return Location(self.x+(self.w/2), self.y+(self.h/2))
python
def getCenter(self): """ Return the ``Location`` of the center of this region """ return Location(self.x+(self.w/2), self.y+(self.h/2))
[ "def", "getCenter", "(", "self", ")", ":", "return", "Location", "(", "self", ".", "x", "+", "(", "self", ".", "w", "/", "2", ")", ",", "self", ".", "y", "+", "(", "self", ".", "h", "/", "2", ")", ")" ]
Return the ``Location`` of the center of this region
[ "Return", "the", "Location", "of", "the", "center", "of", "this", "region" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L280-L282
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.getBottomRight
def getBottomRight(self): """ Return the ``Location`` of the bottom right corner of this region """ return Location(self.x+self.w, self.y+self.h)
python
def getBottomRight(self): """ Return the ``Location`` of the bottom right corner of this region """ return Location(self.x+self.w, self.y+self.h)
[ "def", "getBottomRight", "(", "self", ")", ":", "return", "Location", "(", "self", ".", "x", "+", "self", ".", "w", ",", "self", ".", "y", "+", "self", ".", "h", ")" ]
Return the ``Location`` of the bottom right corner of this region
[ "Return", "the", "Location", "of", "the", "bottom", "right", "corner", "of", "this", "region" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L292-L294
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.offset
def offset(self, location, dy=0): """ Returns a new ``Region`` offset from this one by ``location`` Width and height remain the same """ if not isinstance(location, Location): # Assume variables passed were dx,dy location = Location(location, dy) r = Region(self.x+location.x, self.y+location.y, self.w, self.h).clipRegionToScreen() if r is None: raise ValueError("Specified region is not visible on any screen") return None return r
python
def offset(self, location, dy=0): """ Returns a new ``Region`` offset from this one by ``location`` Width and height remain the same """ if not isinstance(location, Location): # Assume variables passed were dx,dy location = Location(location, dy) r = Region(self.x+location.x, self.y+location.y, self.w, self.h).clipRegionToScreen() if r is None: raise ValueError("Specified region is not visible on any screen") return None return r
[ "def", "offset", "(", "self", ",", "location", ",", "dy", "=", "0", ")", ":", "if", "not", "isinstance", "(", "location", ",", "Location", ")", ":", "# Assume variables passed were dx,dy", "location", "=", "Location", "(", "location", ",", "dy", ")", "r", "=", "Region", "(", "self", ".", "x", "+", "location", ".", "x", ",", "self", ".", "y", "+", "location", ".", "y", ",", "self", ".", "w", ",", "self", ".", "h", ")", ".", "clipRegionToScreen", "(", ")", "if", "r", "is", "None", ":", "raise", "ValueError", "(", "\"Specified region is not visible on any screen\"", ")", "return", "None", "return", "r" ]
Returns a new ``Region`` offset from this one by ``location`` Width and height remain the same
[ "Returns", "a", "new", "Region", "offset", "from", "this", "one", "by", "location" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L331-L343
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.grow
def grow(self, width, height=None): """ Expands the region by ``width`` on both sides and ``height`` on the top and bottom. If only one value is provided, expands the region by that amount on all sides. Equivalent to ``nearby()``. """ if height is None: return self.nearby(width) else: return Region( self.x-width, self.y-height, self.w+(2*width), self.h+(2*height)).clipRegionToScreen()
python
def grow(self, width, height=None): """ Expands the region by ``width`` on both sides and ``height`` on the top and bottom. If only one value is provided, expands the region by that amount on all sides. Equivalent to ``nearby()``. """ if height is None: return self.nearby(width) else: return Region( self.x-width, self.y-height, self.w+(2*width), self.h+(2*height)).clipRegionToScreen()
[ "def", "grow", "(", "self", ",", "width", ",", "height", "=", "None", ")", ":", "if", "height", "is", "None", ":", "return", "self", ".", "nearby", "(", "width", ")", "else", ":", "return", "Region", "(", "self", ".", "x", "-", "width", ",", "self", ".", "y", "-", "height", ",", "self", ".", "w", "+", "(", "2", "*", "width", ")", ",", "self", ".", "h", "+", "(", "2", "*", "height", ")", ")", ".", "clipRegionToScreen", "(", ")" ]
Expands the region by ``width`` on both sides and ``height`` on the top and bottom. If only one value is provided, expands the region by that amount on all sides. Equivalent to ``nearby()``.
[ "Expands", "the", "region", "by", "width", "on", "both", "sides", "and", "height", "on", "the", "top", "and", "bottom", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L344-L357
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.nearby
def nearby(self, expand=50): """ Returns a new Region that includes the nearby neighbourhood of the the current region. The new region is defined by extending the current region's dimensions all directions by range number of pixels. The center of the new region remains the same. """ return Region( self.x-expand, self.y-expand, self.w+(2*expand), self.h+(2*expand)).clipRegionToScreen()
python
def nearby(self, expand=50): """ Returns a new Region that includes the nearby neighbourhood of the the current region. The new region is defined by extending the current region's dimensions all directions by range number of pixels. The center of the new region remains the same. """ return Region( self.x-expand, self.y-expand, self.w+(2*expand), self.h+(2*expand)).clipRegionToScreen()
[ "def", "nearby", "(", "self", ",", "expand", "=", "50", ")", ":", "return", "Region", "(", "self", ".", "x", "-", "expand", ",", "self", ".", "y", "-", "expand", ",", "self", ".", "w", "+", "(", "2", "*", "expand", ")", ",", "self", ".", "h", "+", "(", "2", "*", "expand", ")", ")", ".", "clipRegionToScreen", "(", ")" ]
Returns a new Region that includes the nearby neighbourhood of the the current region. The new region is defined by extending the current region's dimensions all directions by range number of pixels. The center of the new region remains the same.
[ "Returns", "a", "new", "Region", "that", "includes", "the", "nearby", "neighbourhood", "of", "the", "the", "current", "region", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L361-L372
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.left
def left(self, expand=None): """ Returns a new Region left of the current region with a width of ``expand`` pixels. Does not include the current region. If range is omitted, it reaches to the left border of the screen. The new region has the same height and y-position as the current region. """ if expand == None: x = 0 y = self.y w = self.x h = self.h else: x = self.x-expand y = self.y w = expand h = self.h return Region(x, y, w, h).clipRegionToScreen()
python
def left(self, expand=None): """ Returns a new Region left of the current region with a width of ``expand`` pixels. Does not include the current region. If range is omitted, it reaches to the left border of the screen. The new region has the same height and y-position as the current region. """ if expand == None: x = 0 y = self.y w = self.x h = self.h else: x = self.x-expand y = self.y w = expand h = self.h return Region(x, y, w, h).clipRegionToScreen()
[ "def", "left", "(", "self", ",", "expand", "=", "None", ")", ":", "if", "expand", "==", "None", ":", "x", "=", "0", "y", "=", "self", ".", "y", "w", "=", "self", ".", "x", "h", "=", "self", ".", "h", "else", ":", "x", "=", "self", ".", "x", "-", "expand", "y", "=", "self", ".", "y", "w", "=", "expand", "h", "=", "self", ".", "h", "return", "Region", "(", "x", ",", "y", ",", "w", ",", "h", ")", ".", "clipRegionToScreen", "(", ")" ]
Returns a new Region left of the current region with a width of ``expand`` pixels. Does not include the current region. If range is omitted, it reaches to the left border of the screen. The new region has the same height and y-position as the current region.
[ "Returns", "a", "new", "Region", "left", "of", "the", "current", "region", "with", "a", "width", "of", "expand", "pixels", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L407-L423
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.right
def right(self, expand=None): """ Returns a new Region right of the current region with a width of ``expand`` pixels. Does not include the current region. If range is omitted, it reaches to the right border of the screen. The new region has the same height and y-position as the current region. """ if expand == None: x = self.x+self.w y = self.y w = self.getScreen().getBounds()[2] - x h = self.h else: x = self.x+self.w y = self.y w = expand h = self.h return Region(x, y, w, h).clipRegionToScreen()
python
def right(self, expand=None): """ Returns a new Region right of the current region with a width of ``expand`` pixels. Does not include the current region. If range is omitted, it reaches to the right border of the screen. The new region has the same height and y-position as the current region. """ if expand == None: x = self.x+self.w y = self.y w = self.getScreen().getBounds()[2] - x h = self.h else: x = self.x+self.w y = self.y w = expand h = self.h return Region(x, y, w, h).clipRegionToScreen()
[ "def", "right", "(", "self", ",", "expand", "=", "None", ")", ":", "if", "expand", "==", "None", ":", "x", "=", "self", ".", "x", "+", "self", ".", "w", "y", "=", "self", ".", "y", "w", "=", "self", ".", "getScreen", "(", ")", ".", "getBounds", "(", ")", "[", "2", "]", "-", "x", "h", "=", "self", ".", "h", "else", ":", "x", "=", "self", ".", "x", "+", "self", ".", "w", "y", "=", "self", ".", "y", "w", "=", "expand", "h", "=", "self", ".", "h", "return", "Region", "(", "x", ",", "y", ",", "w", ",", "h", ")", ".", "clipRegionToScreen", "(", ")" ]
Returns a new Region right of the current region with a width of ``expand`` pixels. Does not include the current region. If range is omitted, it reaches to the right border of the screen. The new region has the same height and y-position as the current region.
[ "Returns", "a", "new", "Region", "right", "of", "the", "current", "region", "with", "a", "width", "of", "expand", "pixels", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L424-L440
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.getBitmap
def getBitmap(self): """ Captures screen area of this region, at least the part that is on the screen Returns image as numpy array """ return PlatformManager.getBitmapFromRect(self.x, self.y, self.w, self.h)
python
def getBitmap(self): """ Captures screen area of this region, at least the part that is on the screen Returns image as numpy array """ return PlatformManager.getBitmapFromRect(self.x, self.y, self.w, self.h)
[ "def", "getBitmap", "(", "self", ")", ":", "return", "PlatformManager", ".", "getBitmapFromRect", "(", "self", ".", "x", ",", "self", ".", "y", ",", "self", ".", "w", ",", "self", ".", "h", ")" ]
Captures screen area of this region, at least the part that is on the screen Returns image as numpy array
[ "Captures", "screen", "area", "of", "this", "region", "at", "least", "the", "part", "that", "is", "on", "the", "screen" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L449-L454
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.debugPreview
def debugPreview(self, title="Debug"): """ Displays the region in a preview window. If the region is a Match, circles the target area. If the region is larger than half the primary screen in either dimension, scales it down to half size. """ region = self haystack = self.getBitmap() if isinstance(region, Match): cv2.circle( haystack, (region.getTarget().x - self.x, region.getTarget().y - self.y), 5, 255) if haystack.shape[0] > (Screen(0).getBounds()[2]/2) or haystack.shape[1] > (Screen(0).getBounds()[3]/2): # Image is bigger than half the screen; scale it down haystack = cv2.resize(haystack, (0, 0), fx=0.5, fy=0.5) Image.fromarray(haystack).show()
python
def debugPreview(self, title="Debug"): """ Displays the region in a preview window. If the region is a Match, circles the target area. If the region is larger than half the primary screen in either dimension, scales it down to half size. """ region = self haystack = self.getBitmap() if isinstance(region, Match): cv2.circle( haystack, (region.getTarget().x - self.x, region.getTarget().y - self.y), 5, 255) if haystack.shape[0] > (Screen(0).getBounds()[2]/2) or haystack.shape[1] > (Screen(0).getBounds()[3]/2): # Image is bigger than half the screen; scale it down haystack = cv2.resize(haystack, (0, 0), fx=0.5, fy=0.5) Image.fromarray(haystack).show()
[ "def", "debugPreview", "(", "self", ",", "title", "=", "\"Debug\"", ")", ":", "region", "=", "self", "haystack", "=", "self", ".", "getBitmap", "(", ")", "if", "isinstance", "(", "region", ",", "Match", ")", ":", "cv2", ".", "circle", "(", "haystack", ",", "(", "region", ".", "getTarget", "(", ")", ".", "x", "-", "self", ".", "x", ",", "region", ".", "getTarget", "(", ")", ".", "y", "-", "self", ".", "y", ")", ",", "5", ",", "255", ")", "if", "haystack", ".", "shape", "[", "0", "]", ">", "(", "Screen", "(", "0", ")", ".", "getBounds", "(", ")", "[", "2", "]", "/", "2", ")", "or", "haystack", ".", "shape", "[", "1", "]", ">", "(", "Screen", "(", "0", ")", ".", "getBounds", "(", ")", "[", "3", "]", "/", "2", ")", ":", "# Image is bigger than half the screen; scale it down", "haystack", "=", "cv2", ".", "resize", "(", "haystack", ",", "(", "0", ",", "0", ")", ",", "fx", "=", "0.5", ",", "fy", "=", "0.5", ")", "Image", ".", "fromarray", "(", "haystack", ")", ".", "show", "(", ")" ]
Displays the region in a preview window. If the region is a Match, circles the target area. If the region is larger than half the primary screen in either dimension, scales it down to half size.
[ "Displays", "the", "region", "in", "a", "preview", "window", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L455-L472
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.wait
def wait(self, pattern, seconds=None): """ Searches for an image pattern in the given region, given a specified timeout period Functionally identical to find(). If a number is passed instead of a pattern, just waits the specified number of seconds. Sikuli supports OCR search with a text parameter. This does not (yet). """ if isinstance(pattern, (int, float)): if pattern == FOREVER: while True: time.sleep(1) # Infinite loop time.sleep(pattern) return None if seconds is None: seconds = self.autoWaitTimeout findFailedRetry = True timeout = time.time() + seconds while findFailedRetry: while True: match = self.exists(pattern) if match: return match if time.time() >= timeout: break path = pattern.path if isinstance(pattern, Pattern) else pattern findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path)) if findFailedRetry: time.sleep(self._repeatWaitTime) return None
python
def wait(self, pattern, seconds=None): """ Searches for an image pattern in the given region, given a specified timeout period Functionally identical to find(). If a number is passed instead of a pattern, just waits the specified number of seconds. Sikuli supports OCR search with a text parameter. This does not (yet). """ if isinstance(pattern, (int, float)): if pattern == FOREVER: while True: time.sleep(1) # Infinite loop time.sleep(pattern) return None if seconds is None: seconds = self.autoWaitTimeout findFailedRetry = True timeout = time.time() + seconds while findFailedRetry: while True: match = self.exists(pattern) if match: return match if time.time() >= timeout: break path = pattern.path if isinstance(pattern, Pattern) else pattern findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path)) if findFailedRetry: time.sleep(self._repeatWaitTime) return None
[ "def", "wait", "(", "self", ",", "pattern", ",", "seconds", "=", "None", ")", ":", "if", "isinstance", "(", "pattern", ",", "(", "int", ",", "float", ")", ")", ":", "if", "pattern", "==", "FOREVER", ":", "while", "True", ":", "time", ".", "sleep", "(", "1", ")", "# Infinite loop", "time", ".", "sleep", "(", "pattern", ")", "return", "None", "if", "seconds", "is", "None", ":", "seconds", "=", "self", ".", "autoWaitTimeout", "findFailedRetry", "=", "True", "timeout", "=", "time", ".", "time", "(", ")", "+", "seconds", "while", "findFailedRetry", ":", "while", "True", ":", "match", "=", "self", ".", "exists", "(", "pattern", ")", "if", "match", ":", "return", "match", "if", "time", ".", "time", "(", ")", ">=", "timeout", ":", "break", "path", "=", "pattern", ".", "path", "if", "isinstance", "(", "pattern", ",", "Pattern", ")", "else", "pattern", "findFailedRetry", "=", "self", ".", "_raiseFindFailed", "(", "\"Could not find pattern '{}'\"", ".", "format", "(", "path", ")", ")", "if", "findFailedRetry", ":", "time", ".", "sleep", "(", "self", ".", "_repeatWaitTime", ")", "return", "None" ]
Searches for an image pattern in the given region, given a specified timeout period Functionally identical to find(). If a number is passed instead of a pattern, just waits the specified number of seconds. Sikuli supports OCR search with a text parameter. This does not (yet).
[ "Searches", "for", "an", "image", "pattern", "in", "the", "given", "region", "given", "a", "specified", "timeout", "period" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L566-L596
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.waitVanish
def waitVanish(self, pattern, seconds=None): """ Waits until the specified pattern is not visible on screen. If ``seconds`` pass and the pattern is still visible, raises FindFailed exception. Sikuli supports OCR search with a text parameter. This does not (yet). """ r = self.clipRegionToScreen() if r is None: raise ValueError("Region outside all visible screens") return None if seconds is None: seconds = self.autoWaitTimeout if not isinstance(pattern, Pattern): if not isinstance(pattern, basestring): raise TypeError("find expected a string [image path] or Pattern object") pattern = Pattern(pattern) needle = cv2.imread(pattern.path) match = True timeout = time.time() + seconds while match and time.time() < timeout: matcher = TemplateMatcher(r.getBitmap()) # When needle disappears, matcher returns None match = matcher.findBestMatch(needle, pattern.similarity) time.sleep(1/self._defaultScanRate if self._defaultScanRate is not None else 1/Settings.WaitScanRate) if match: return False
python
def waitVanish(self, pattern, seconds=None): """ Waits until the specified pattern is not visible on screen. If ``seconds`` pass and the pattern is still visible, raises FindFailed exception. Sikuli supports OCR search with a text parameter. This does not (yet). """ r = self.clipRegionToScreen() if r is None: raise ValueError("Region outside all visible screens") return None if seconds is None: seconds = self.autoWaitTimeout if not isinstance(pattern, Pattern): if not isinstance(pattern, basestring): raise TypeError("find expected a string [image path] or Pattern object") pattern = Pattern(pattern) needle = cv2.imread(pattern.path) match = True timeout = time.time() + seconds while match and time.time() < timeout: matcher = TemplateMatcher(r.getBitmap()) # When needle disappears, matcher returns None match = matcher.findBestMatch(needle, pattern.similarity) time.sleep(1/self._defaultScanRate if self._defaultScanRate is not None else 1/Settings.WaitScanRate) if match: return False
[ "def", "waitVanish", "(", "self", ",", "pattern", ",", "seconds", "=", "None", ")", ":", "r", "=", "self", ".", "clipRegionToScreen", "(", ")", "if", "r", "is", "None", ":", "raise", "ValueError", "(", "\"Region outside all visible screens\"", ")", "return", "None", "if", "seconds", "is", "None", ":", "seconds", "=", "self", ".", "autoWaitTimeout", "if", "not", "isinstance", "(", "pattern", ",", "Pattern", ")", ":", "if", "not", "isinstance", "(", "pattern", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"find expected a string [image path] or Pattern object\"", ")", "pattern", "=", "Pattern", "(", "pattern", ")", "needle", "=", "cv2", ".", "imread", "(", "pattern", ".", "path", ")", "match", "=", "True", "timeout", "=", "time", ".", "time", "(", ")", "+", "seconds", "while", "match", "and", "time", ".", "time", "(", ")", "<", "timeout", ":", "matcher", "=", "TemplateMatcher", "(", "r", ".", "getBitmap", "(", ")", ")", "# When needle disappears, matcher returns None", "match", "=", "matcher", ".", "findBestMatch", "(", "needle", ",", "pattern", ".", "similarity", ")", "time", ".", "sleep", "(", "1", "/", "self", ".", "_defaultScanRate", "if", "self", ".", "_defaultScanRate", "is", "not", "None", "else", "1", "/", "Settings", ".", "WaitScanRate", ")", "if", "match", ":", "return", "False" ]
Waits until the specified pattern is not visible on screen. If ``seconds`` pass and the pattern is still visible, raises FindFailed exception. Sikuli supports OCR search with a text parameter. This does not (yet).
[ "Waits", "until", "the", "specified", "pattern", "is", "not", "visible", "on", "screen", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L597-L624
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.click
def click(self, target=None, modifiers=""): """ Moves the cursor to the target location and clicks the default mouse button. """ if target is None: target = self._lastMatch or self # Whichever one is not None target_location = None if isinstance(target, Pattern): target_location = self.find(target).getTarget() elif isinstance(target, basestring): target_location = self.find(target).getTarget() elif isinstance(target, Match): target_location = target.getTarget() elif isinstance(target, Region): target_location = target.getCenter() elif isinstance(target, Location): target_location = target else: raise TypeError("click expected Pattern, String, Match, Region, or Location object") if modifiers != "": keyboard.keyDown(modifiers) Mouse.moveSpeed(target_location, Settings.MoveMouseDelay) time.sleep(0.1) # For responsiveness if Settings.ClickDelay > 0: time.sleep(min(1.0, Settings.ClickDelay)) Settings.ClickDelay = 0.0 Mouse.click() time.sleep(0.1) if modifiers != 0: keyboard.keyUp(modifiers) Debug.history("Clicked at {}".format(target_location))
python
def click(self, target=None, modifiers=""): """ Moves the cursor to the target location and clicks the default mouse button. """ if target is None: target = self._lastMatch or self # Whichever one is not None target_location = None if isinstance(target, Pattern): target_location = self.find(target).getTarget() elif isinstance(target, basestring): target_location = self.find(target).getTarget() elif isinstance(target, Match): target_location = target.getTarget() elif isinstance(target, Region): target_location = target.getCenter() elif isinstance(target, Location): target_location = target else: raise TypeError("click expected Pattern, String, Match, Region, or Location object") if modifiers != "": keyboard.keyDown(modifiers) Mouse.moveSpeed(target_location, Settings.MoveMouseDelay) time.sleep(0.1) # For responsiveness if Settings.ClickDelay > 0: time.sleep(min(1.0, Settings.ClickDelay)) Settings.ClickDelay = 0.0 Mouse.click() time.sleep(0.1) if modifiers != 0: keyboard.keyUp(modifiers) Debug.history("Clicked at {}".format(target_location))
[ "def", "click", "(", "self", ",", "target", "=", "None", ",", "modifiers", "=", "\"\"", ")", ":", "if", "target", "is", "None", ":", "target", "=", "self", ".", "_lastMatch", "or", "self", "# Whichever one is not None", "target_location", "=", "None", "if", "isinstance", "(", "target", ",", "Pattern", ")", ":", "target_location", "=", "self", ".", "find", "(", "target", ")", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "target", ",", "basestring", ")", ":", "target_location", "=", "self", ".", "find", "(", "target", ")", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "target", ",", "Match", ")", ":", "target_location", "=", "target", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "target", ",", "Region", ")", ":", "target_location", "=", "target", ".", "getCenter", "(", ")", "elif", "isinstance", "(", "target", ",", "Location", ")", ":", "target_location", "=", "target", "else", ":", "raise", "TypeError", "(", "\"click expected Pattern, String, Match, Region, or Location object\"", ")", "if", "modifiers", "!=", "\"\"", ":", "keyboard", ".", "keyDown", "(", "modifiers", ")", "Mouse", ".", "moveSpeed", "(", "target_location", ",", "Settings", ".", "MoveMouseDelay", ")", "time", ".", "sleep", "(", "0.1", ")", "# For responsiveness", "if", "Settings", ".", "ClickDelay", ">", "0", ":", "time", ".", "sleep", "(", "min", "(", "1.0", ",", "Settings", ".", "ClickDelay", ")", ")", "Settings", ".", "ClickDelay", "=", "0.0", "Mouse", ".", "click", "(", ")", "time", ".", "sleep", "(", "0.1", ")", "if", "modifiers", "!=", "0", ":", "keyboard", ".", "keyUp", "(", "modifiers", ")", "Debug", ".", "history", "(", "\"Clicked at {}\"", ".", "format", "(", "target_location", ")", ")" ]
Moves the cursor to the target location and clicks the default mouse button.
[ "Moves", "the", "cursor", "to", "the", "target", "location", "and", "clicks", "the", "default", "mouse", "button", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L686-L717
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.hover
def hover(self, target=None): """ Moves the cursor to the target location """ if target is None: target = self._lastMatch or self # Whichever one is not None target_location = None if isinstance(target, Pattern): target_location = self.find(target).getTarget() elif isinstance(target, basestring): target_location = self.find(target).getTarget() elif isinstance(target, Match): target_location = target.getTarget() elif isinstance(target, Region): target_location = target.getCenter() elif isinstance(target, Location): target_location = target else: raise TypeError("hover expected Pattern, String, Match, Region, or Location object") Mouse.moveSpeed(target_location, Settings.MoveMouseDelay)
python
def hover(self, target=None): """ Moves the cursor to the target location """ if target is None: target = self._lastMatch or self # Whichever one is not None target_location = None if isinstance(target, Pattern): target_location = self.find(target).getTarget() elif isinstance(target, basestring): target_location = self.find(target).getTarget() elif isinstance(target, Match): target_location = target.getTarget() elif isinstance(target, Region): target_location = target.getCenter() elif isinstance(target, Location): target_location = target else: raise TypeError("hover expected Pattern, String, Match, Region, or Location object") Mouse.moveSpeed(target_location, Settings.MoveMouseDelay)
[ "def", "hover", "(", "self", ",", "target", "=", "None", ")", ":", "if", "target", "is", "None", ":", "target", "=", "self", ".", "_lastMatch", "or", "self", "# Whichever one is not None", "target_location", "=", "None", "if", "isinstance", "(", "target", ",", "Pattern", ")", ":", "target_location", "=", "self", ".", "find", "(", "target", ")", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "target", ",", "basestring", ")", ":", "target_location", "=", "self", ".", "find", "(", "target", ")", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "target", ",", "Match", ")", ":", "target_location", "=", "target", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "target", ",", "Region", ")", ":", "target_location", "=", "target", ".", "getCenter", "(", ")", "elif", "isinstance", "(", "target", ",", "Location", ")", ":", "target_location", "=", "target", "else", ":", "raise", "TypeError", "(", "\"hover expected Pattern, String, Match, Region, or Location object\"", ")", "Mouse", ".", "moveSpeed", "(", "target_location", ",", "Settings", ".", "MoveMouseDelay", ")" ]
Moves the cursor to the target location
[ "Moves", "the", "cursor", "to", "the", "target", "location" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L785-L803
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.drag
def drag(self, dragFrom=None): """ Starts a dragDrop operation. Moves the cursor to the target location and clicks the mouse in preparation to drag a screen element """ if dragFrom is None: dragFrom = self._lastMatch or self # Whichever one is not None dragFromLocation = None if isinstance(dragFrom, Pattern): dragFromLocation = self.find(dragFrom).getTarget() elif isinstance(dragFrom, basestring): dragFromLocation = self.find(dragFrom).getTarget() elif isinstance(dragFrom, Match): dragFromLocation = dragFrom.getTarget() elif isinstance(dragFrom, Region): dragFromLocation = dragFrom.getCenter() elif isinstance(dragFrom, Location): dragFromLocation = dragFrom else: raise TypeError("drag expected dragFrom to be Pattern, String, Match, Region, or Location object") Mouse.moveSpeed(dragFromLocation, Settings.MoveMouseDelay) time.sleep(Settings.DelayBeforeMouseDown) Mouse.buttonDown() Debug.history("Began drag at {}".format(dragFromLocation))
python
def drag(self, dragFrom=None): """ Starts a dragDrop operation. Moves the cursor to the target location and clicks the mouse in preparation to drag a screen element """ if dragFrom is None: dragFrom = self._lastMatch or self # Whichever one is not None dragFromLocation = None if isinstance(dragFrom, Pattern): dragFromLocation = self.find(dragFrom).getTarget() elif isinstance(dragFrom, basestring): dragFromLocation = self.find(dragFrom).getTarget() elif isinstance(dragFrom, Match): dragFromLocation = dragFrom.getTarget() elif isinstance(dragFrom, Region): dragFromLocation = dragFrom.getCenter() elif isinstance(dragFrom, Location): dragFromLocation = dragFrom else: raise TypeError("drag expected dragFrom to be Pattern, String, Match, Region, or Location object") Mouse.moveSpeed(dragFromLocation, Settings.MoveMouseDelay) time.sleep(Settings.DelayBeforeMouseDown) Mouse.buttonDown() Debug.history("Began drag at {}".format(dragFromLocation))
[ "def", "drag", "(", "self", ",", "dragFrom", "=", "None", ")", ":", "if", "dragFrom", "is", "None", ":", "dragFrom", "=", "self", ".", "_lastMatch", "or", "self", "# Whichever one is not None", "dragFromLocation", "=", "None", "if", "isinstance", "(", "dragFrom", ",", "Pattern", ")", ":", "dragFromLocation", "=", "self", ".", "find", "(", "dragFrom", ")", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "dragFrom", ",", "basestring", ")", ":", "dragFromLocation", "=", "self", ".", "find", "(", "dragFrom", ")", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "dragFrom", ",", "Match", ")", ":", "dragFromLocation", "=", "dragFrom", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "dragFrom", ",", "Region", ")", ":", "dragFromLocation", "=", "dragFrom", ".", "getCenter", "(", ")", "elif", "isinstance", "(", "dragFrom", ",", "Location", ")", ":", "dragFromLocation", "=", "dragFrom", "else", ":", "raise", "TypeError", "(", "\"drag expected dragFrom to be Pattern, String, Match, Region, or Location object\"", ")", "Mouse", ".", "moveSpeed", "(", "dragFromLocation", ",", "Settings", ".", "MoveMouseDelay", ")", "time", ".", "sleep", "(", "Settings", ".", "DelayBeforeMouseDown", ")", "Mouse", ".", "buttonDown", "(", ")", "Debug", ".", "history", "(", "\"Began drag at {}\"", ".", "format", "(", "dragFromLocation", ")", ")" ]
Starts a dragDrop operation. Moves the cursor to the target location and clicks the mouse in preparation to drag a screen element
[ "Starts", "a", "dragDrop", "operation", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L804-L827
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.dropAt
def dropAt(self, dragTo=None, delay=None): """ Completes a dragDrop operation Moves the cursor to the target location, waits ``delay`` seconds, and releases the mouse button """ if dragTo is None: dragTo = self._lastMatch or self # Whichever one is not None if isinstance(dragTo, Pattern): dragToLocation = self.find(dragTo).getTarget() elif isinstance(dragTo, basestring): dragToLocation = self.find(dragTo).getTarget() elif isinstance(dragTo, Match): dragToLocation = dragTo.getTarget() elif isinstance(dragTo, Region): dragToLocation = dragTo.getCenter() elif isinstance(dragTo, Location): dragToLocation = dragTo else: raise TypeError("dragDrop expected dragTo to be Pattern, String, Match, Region, or Location object") Mouse.moveSpeed(dragToLocation, Settings.MoveMouseDelay) time.sleep(delay if delay is not None else Settings.DelayBeforeDrop) Mouse.buttonUp() Debug.history("Ended drag at {}".format(dragToLocation))
python
def dropAt(self, dragTo=None, delay=None): """ Completes a dragDrop operation Moves the cursor to the target location, waits ``delay`` seconds, and releases the mouse button """ if dragTo is None: dragTo = self._lastMatch or self # Whichever one is not None if isinstance(dragTo, Pattern): dragToLocation = self.find(dragTo).getTarget() elif isinstance(dragTo, basestring): dragToLocation = self.find(dragTo).getTarget() elif isinstance(dragTo, Match): dragToLocation = dragTo.getTarget() elif isinstance(dragTo, Region): dragToLocation = dragTo.getCenter() elif isinstance(dragTo, Location): dragToLocation = dragTo else: raise TypeError("dragDrop expected dragTo to be Pattern, String, Match, Region, or Location object") Mouse.moveSpeed(dragToLocation, Settings.MoveMouseDelay) time.sleep(delay if delay is not None else Settings.DelayBeforeDrop) Mouse.buttonUp() Debug.history("Ended drag at {}".format(dragToLocation))
[ "def", "dropAt", "(", "self", ",", "dragTo", "=", "None", ",", "delay", "=", "None", ")", ":", "if", "dragTo", "is", "None", ":", "dragTo", "=", "self", ".", "_lastMatch", "or", "self", "# Whichever one is not None", "if", "isinstance", "(", "dragTo", ",", "Pattern", ")", ":", "dragToLocation", "=", "self", ".", "find", "(", "dragTo", ")", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "dragTo", ",", "basestring", ")", ":", "dragToLocation", "=", "self", ".", "find", "(", "dragTo", ")", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "dragTo", ",", "Match", ")", ":", "dragToLocation", "=", "dragTo", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "dragTo", ",", "Region", ")", ":", "dragToLocation", "=", "dragTo", ".", "getCenter", "(", ")", "elif", "isinstance", "(", "dragTo", ",", "Location", ")", ":", "dragToLocation", "=", "dragTo", "else", ":", "raise", "TypeError", "(", "\"dragDrop expected dragTo to be Pattern, String, Match, Region, or Location object\"", ")", "Mouse", ".", "moveSpeed", "(", "dragToLocation", ",", "Settings", ".", "MoveMouseDelay", ")", "time", ".", "sleep", "(", "delay", "if", "delay", "is", "not", "None", "else", "Settings", ".", "DelayBeforeDrop", ")", "Mouse", ".", "buttonUp", "(", ")", "Debug", ".", "history", "(", "\"Ended drag at {}\"", ".", "format", "(", "dragToLocation", ")", ")" ]
Completes a dragDrop operation Moves the cursor to the target location, waits ``delay`` seconds, and releases the mouse button
[ "Completes", "a", "dragDrop", "operation" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L828-L851
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.dragDrop
def dragDrop(self, target, target2=None, modifiers=""): """ Performs a dragDrop operation. Holds down the mouse button on ``dragFrom``, moves the mouse to ``dragTo``, and releases the mouse button. ``modifiers`` may be a typeKeys() compatible string. The specified keys will be held during the drag-drop operation. """ if modifiers != "": keyboard.keyDown(modifiers) if target2 is None: dragFrom = self._lastMatch dragTo = target else: dragFrom = target dragTo = target2 self.drag(dragFrom) time.sleep(Settings.DelayBeforeDrag) self.dropAt(dragTo) if modifiers != "": keyboard.keyUp(modifiers)
python
def dragDrop(self, target, target2=None, modifiers=""): """ Performs a dragDrop operation. Holds down the mouse button on ``dragFrom``, moves the mouse to ``dragTo``, and releases the mouse button. ``modifiers`` may be a typeKeys() compatible string. The specified keys will be held during the drag-drop operation. """ if modifiers != "": keyboard.keyDown(modifiers) if target2 is None: dragFrom = self._lastMatch dragTo = target else: dragFrom = target dragTo = target2 self.drag(dragFrom) time.sleep(Settings.DelayBeforeDrag) self.dropAt(dragTo) if modifiers != "": keyboard.keyUp(modifiers)
[ "def", "dragDrop", "(", "self", ",", "target", ",", "target2", "=", "None", ",", "modifiers", "=", "\"\"", ")", ":", "if", "modifiers", "!=", "\"\"", ":", "keyboard", ".", "keyDown", "(", "modifiers", ")", "if", "target2", "is", "None", ":", "dragFrom", "=", "self", ".", "_lastMatch", "dragTo", "=", "target", "else", ":", "dragFrom", "=", "target", "dragTo", "=", "target2", "self", ".", "drag", "(", "dragFrom", ")", "time", ".", "sleep", "(", "Settings", ".", "DelayBeforeDrag", ")", "self", ".", "dropAt", "(", "dragTo", ")", "if", "modifiers", "!=", "\"\"", ":", "keyboard", ".", "keyUp", "(", "modifiers", ")" ]
Performs a dragDrop operation. Holds down the mouse button on ``dragFrom``, moves the mouse to ``dragTo``, and releases the mouse button. ``modifiers`` may be a typeKeys() compatible string. The specified keys will be held during the drag-drop operation.
[ "Performs", "a", "dragDrop", "operation", "." ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L852-L876
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.mouseMove
def mouseMove(self, PSRML=None, dy=0): """ Low-level mouse actions """ if PSRML is None: PSRML = self._lastMatch or self # Whichever one is not None if isinstance(PSRML, Pattern): move_location = self.find(PSRML).getTarget() elif isinstance(PSRML, basestring): move_location = self.find(PSRML).getTarget() elif isinstance(PSRML, Match): move_location = PSRML.getTarget() elif isinstance(PSRML, Region): move_location = PSRML.getCenter() elif isinstance(PSRML, Location): move_location = PSRML elif isinstance(PSRML, int): # Assume called as mouseMove(dx, dy) offset = Location(PSRML, dy) move_location = Mouse.getPos().offset(offset) else: raise TypeError("doubleClick expected Pattern, String, Match, Region, or Location object") Mouse.moveSpeed(move_location)
python
def mouseMove(self, PSRML=None, dy=0): """ Low-level mouse actions """ if PSRML is None: PSRML = self._lastMatch or self # Whichever one is not None if isinstance(PSRML, Pattern): move_location = self.find(PSRML).getTarget() elif isinstance(PSRML, basestring): move_location = self.find(PSRML).getTarget() elif isinstance(PSRML, Match): move_location = PSRML.getTarget() elif isinstance(PSRML, Region): move_location = PSRML.getCenter() elif isinstance(PSRML, Location): move_location = PSRML elif isinstance(PSRML, int): # Assume called as mouseMove(dx, dy) offset = Location(PSRML, dy) move_location = Mouse.getPos().offset(offset) else: raise TypeError("doubleClick expected Pattern, String, Match, Region, or Location object") Mouse.moveSpeed(move_location)
[ "def", "mouseMove", "(", "self", ",", "PSRML", "=", "None", ",", "dy", "=", "0", ")", ":", "if", "PSRML", "is", "None", ":", "PSRML", "=", "self", ".", "_lastMatch", "or", "self", "# Whichever one is not None", "if", "isinstance", "(", "PSRML", ",", "Pattern", ")", ":", "move_location", "=", "self", ".", "find", "(", "PSRML", ")", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "PSRML", ",", "basestring", ")", ":", "move_location", "=", "self", ".", "find", "(", "PSRML", ")", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "PSRML", ",", "Match", ")", ":", "move_location", "=", "PSRML", ".", "getTarget", "(", ")", "elif", "isinstance", "(", "PSRML", ",", "Region", ")", ":", "move_location", "=", "PSRML", ".", "getCenter", "(", ")", "elif", "isinstance", "(", "PSRML", ",", "Location", ")", ":", "move_location", "=", "PSRML", "elif", "isinstance", "(", "PSRML", ",", "int", ")", ":", "# Assume called as mouseMove(dx, dy)", "offset", "=", "Location", "(", "PSRML", ",", "dy", ")", "move_location", "=", "Mouse", ".", "getPos", "(", ")", ".", "offset", "(", "offset", ")", "else", ":", "raise", "TypeError", "(", "\"doubleClick expected Pattern, String, Match, Region, or Location object\"", ")", "Mouse", ".", "moveSpeed", "(", "move_location", ")" ]
Low-level mouse actions
[ "Low", "-", "level", "mouse", "actions" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L959-L979
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.isRegionValid
def isRegionValid(self): """ Returns false if the whole region is not even partially inside any screen, otherwise true """ screens = PlatformManager.getScreenDetails() for screen in screens: s_x, s_y, s_w, s_h = screen["rect"] if self.x+self.w >= s_x and s_x+s_w >= self.x and self.y+self.h >= s_y and s_y+s_h >= self.y: # Rects overlap return True return False
python
def isRegionValid(self): """ Returns false if the whole region is not even partially inside any screen, otherwise true """ screens = PlatformManager.getScreenDetails() for screen in screens: s_x, s_y, s_w, s_h = screen["rect"] if self.x+self.w >= s_x and s_x+s_w >= self.x and self.y+self.h >= s_y and s_y+s_h >= self.y: # Rects overlap return True return False
[ "def", "isRegionValid", "(", "self", ")", ":", "screens", "=", "PlatformManager", ".", "getScreenDetails", "(", ")", "for", "screen", "in", "screens", ":", "s_x", ",", "s_y", ",", "s_w", ",", "s_h", "=", "screen", "[", "\"rect\"", "]", "if", "self", ".", "x", "+", "self", ".", "w", ">=", "s_x", "and", "s_x", "+", "s_w", ">=", "self", ".", "x", "and", "self", ".", "y", "+", "self", ".", "h", ">=", "s_y", "and", "s_y", "+", "s_h", ">=", "self", ".", "y", ":", "# Rects overlap", "return", "True", "return", "False" ]
Returns false if the whole region is not even partially inside any screen, otherwise true
[ "Returns", "false", "if", "the", "whole", "region", "is", "not", "even", "partially", "inside", "any", "screen", "otherwise", "true" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1017-L1025
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.clipRegionToScreen
def clipRegionToScreen(self): """ Returns the part of the region that is visible on a screen If the region equals to all visible screens, returns Screen(-1). If the region is visible on multiple screens, returns the screen with the smallest ID. Returns None if the region is outside the screen. """ if not self.isRegionValid(): return None screens = PlatformManager.getScreenDetails() total_x, total_y, total_w, total_h = Screen(-1).getBounds() containing_screen = None for screen in screens: s_x, s_y, s_w, s_h = screen["rect"] if self.x >= s_x and self.x+self.w <= s_x+s_w and self.y >= s_y and self.y+self.h <= s_y+s_h: # Region completely inside screen return self elif self.x+self.w <= s_x or s_x+s_w <= self.x or self.y+self.h <= s_y or s_y+s_h <= self.y: # Region completely outside screen continue elif self.x == total_x and self.y == total_y and self.w == total_w and self.h == total_h: # Region equals all screens, Screen(-1) return self else: # Region partially inside screen x = max(self.x, s_x) y = max(self.y, s_y) w = min(self.w, s_w) h = min(self.h, s_h) return Region(x, y, w, h) return None
python
def clipRegionToScreen(self): """ Returns the part of the region that is visible on a screen If the region equals to all visible screens, returns Screen(-1). If the region is visible on multiple screens, returns the screen with the smallest ID. Returns None if the region is outside the screen. """ if not self.isRegionValid(): return None screens = PlatformManager.getScreenDetails() total_x, total_y, total_w, total_h = Screen(-1).getBounds() containing_screen = None for screen in screens: s_x, s_y, s_w, s_h = screen["rect"] if self.x >= s_x and self.x+self.w <= s_x+s_w and self.y >= s_y and self.y+self.h <= s_y+s_h: # Region completely inside screen return self elif self.x+self.w <= s_x or s_x+s_w <= self.x or self.y+self.h <= s_y or s_y+s_h <= self.y: # Region completely outside screen continue elif self.x == total_x and self.y == total_y and self.w == total_w and self.h == total_h: # Region equals all screens, Screen(-1) return self else: # Region partially inside screen x = max(self.x, s_x) y = max(self.y, s_y) w = min(self.w, s_w) h = min(self.h, s_h) return Region(x, y, w, h) return None
[ "def", "clipRegionToScreen", "(", "self", ")", ":", "if", "not", "self", ".", "isRegionValid", "(", ")", ":", "return", "None", "screens", "=", "PlatformManager", ".", "getScreenDetails", "(", ")", "total_x", ",", "total_y", ",", "total_w", ",", "total_h", "=", "Screen", "(", "-", "1", ")", ".", "getBounds", "(", ")", "containing_screen", "=", "None", "for", "screen", "in", "screens", ":", "s_x", ",", "s_y", ",", "s_w", ",", "s_h", "=", "screen", "[", "\"rect\"", "]", "if", "self", ".", "x", ">=", "s_x", "and", "self", ".", "x", "+", "self", ".", "w", "<=", "s_x", "+", "s_w", "and", "self", ".", "y", ">=", "s_y", "and", "self", ".", "y", "+", "self", ".", "h", "<=", "s_y", "+", "s_h", ":", "# Region completely inside screen", "return", "self", "elif", "self", ".", "x", "+", "self", ".", "w", "<=", "s_x", "or", "s_x", "+", "s_w", "<=", "self", ".", "x", "or", "self", ".", "y", "+", "self", ".", "h", "<=", "s_y", "or", "s_y", "+", "s_h", "<=", "self", ".", "y", ":", "# Region completely outside screen", "continue", "elif", "self", ".", "x", "==", "total_x", "and", "self", ".", "y", "==", "total_y", "and", "self", ".", "w", "==", "total_w", "and", "self", ".", "h", "==", "total_h", ":", "# Region equals all screens, Screen(-1)", "return", "self", "else", ":", "# Region partially inside screen", "x", "=", "max", "(", "self", ".", "x", ",", "s_x", ")", "y", "=", "max", "(", "self", ".", "y", ",", "s_y", ")", "w", "=", "min", "(", "self", ".", "w", ",", "s_w", ")", "h", "=", "min", "(", "self", ".", "h", ",", "s_h", ")", "return", "Region", "(", "x", ",", "y", ",", "w", ",", "h", ")", "return", "None" ]
Returns the part of the region that is visible on a screen If the region equals to all visible screens, returns Screen(-1). If the region is visible on multiple screens, returns the screen with the smallest ID. Returns None if the region is outside the screen.
[ "Returns", "the", "part", "of", "the", "region", "that", "is", "visible", "on", "a", "screen" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1027-L1057
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.get
def get(self, part): """ Returns a section of the region as a new region Accepts partitioning constants, e.g. Region.NORTH, Region.NORTH_WEST, etc. Also accepts an int 200-999: * First digit: Raster (*n* rows by *n* columns) * Second digit: Row index (if equal to raster, gets the whole row) * Third digit: Column index (if equal to raster, gets the whole column) Region.get(522) will use a raster of 5 rows and 5 columns and return the cell in the middle. Region.get(525) will use a raster of 5 rows and 5 columns and return the row in the middle. """ if part == self.MID_VERTICAL: return Region(self.x+(self.w/4), y, self.w/2, self.h) elif part == self.MID_HORIZONTAL: return Region(self.x, self.y+(self.h/4), self.w, self.h/2) elif part == self.MID_BIG: return Region(self.x+(self.w/4), self.y+(self.h/4), self.w/2, self.h/2) elif isinstance(part, int) and part >= 200 and part <= 999: raster, row, column = str(part) self.setRaster(raster, raster) if row == raster and column == raster: return self elif row == raster: return self.getCol(column) elif column == raster: return self.getRow(row) else: return self.getCell(row,column) else: return self
python
def get(self, part): """ Returns a section of the region as a new region Accepts partitioning constants, e.g. Region.NORTH, Region.NORTH_WEST, etc. Also accepts an int 200-999: * First digit: Raster (*n* rows by *n* columns) * Second digit: Row index (if equal to raster, gets the whole row) * Third digit: Column index (if equal to raster, gets the whole column) Region.get(522) will use a raster of 5 rows and 5 columns and return the cell in the middle. Region.get(525) will use a raster of 5 rows and 5 columns and return the row in the middle. """ if part == self.MID_VERTICAL: return Region(self.x+(self.w/4), y, self.w/2, self.h) elif part == self.MID_HORIZONTAL: return Region(self.x, self.y+(self.h/4), self.w, self.h/2) elif part == self.MID_BIG: return Region(self.x+(self.w/4), self.y+(self.h/4), self.w/2, self.h/2) elif isinstance(part, int) and part >= 200 and part <= 999: raster, row, column = str(part) self.setRaster(raster, raster) if row == raster and column == raster: return self elif row == raster: return self.getCol(column) elif column == raster: return self.getRow(row) else: return self.getCell(row,column) else: return self
[ "def", "get", "(", "self", ",", "part", ")", ":", "if", "part", "==", "self", ".", "MID_VERTICAL", ":", "return", "Region", "(", "self", ".", "x", "+", "(", "self", ".", "w", "/", "4", ")", ",", "y", ",", "self", ".", "w", "/", "2", ",", "self", ".", "h", ")", "elif", "part", "==", "self", ".", "MID_HORIZONTAL", ":", "return", "Region", "(", "self", ".", "x", ",", "self", ".", "y", "+", "(", "self", ".", "h", "/", "4", ")", ",", "self", ".", "w", ",", "self", ".", "h", "/", "2", ")", "elif", "part", "==", "self", ".", "MID_BIG", ":", "return", "Region", "(", "self", ".", "x", "+", "(", "self", ".", "w", "/", "4", ")", ",", "self", ".", "y", "+", "(", "self", ".", "h", "/", "4", ")", ",", "self", ".", "w", "/", "2", ",", "self", ".", "h", "/", "2", ")", "elif", "isinstance", "(", "part", ",", "int", ")", "and", "part", ">=", "200", "and", "part", "<=", "999", ":", "raster", ",", "row", ",", "column", "=", "str", "(", "part", ")", "self", ".", "setRaster", "(", "raster", ",", "raster", ")", "if", "row", "==", "raster", "and", "column", "==", "raster", ":", "return", "self", "elif", "row", "==", "raster", ":", "return", "self", ".", "getCol", "(", "column", ")", "elif", "column", "==", "raster", ":", "return", "self", ".", "getRow", "(", "row", ")", "else", ":", "return", "self", ".", "getCell", "(", "row", ",", "column", ")", "else", ":", "return", "self" ]
Returns a section of the region as a new region Accepts partitioning constants, e.g. Region.NORTH, Region.NORTH_WEST, etc. Also accepts an int 200-999: * First digit: Raster (*n* rows by *n* columns) * Second digit: Row index (if equal to raster, gets the whole row) * Third digit: Column index (if equal to raster, gets the whole column) Region.get(522) will use a raster of 5 rows and 5 columns and return the cell in the middle. Region.get(525) will use a raster of 5 rows and 5 columns and return the row in the middle.
[ "Returns", "a", "section", "of", "the", "region", "as", "a", "new", "region" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1160-L1192
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.setCenter
def setCenter(self, loc): """ Move this region so it is centered on ``loc`` """ offset = self.getCenter().getOffset(loc) # Calculate offset from current center return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
python
def setCenter(self, loc): """ Move this region so it is centered on ``loc`` """ offset = self.getCenter().getOffset(loc) # Calculate offset from current center return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
[ "def", "setCenter", "(", "self", ",", "loc", ")", ":", "offset", "=", "self", ".", "getCenter", "(", ")", ".", "getOffset", "(", "loc", ")", "# Calculate offset from current center", "return", "self", ".", "setLocation", "(", "self", ".", "getTopLeft", "(", ")", ".", "offset", "(", "offset", ")", ")", "# Move top left corner by the same offset" ]
Move this region so it is centered on ``loc``
[ "Move", "this", "region", "so", "it", "is", "centered", "on", "loc" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1226-L1229
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.setTopRight
def setTopRight(self, loc): """ Move this region so its top right corner is on ``loc`` """ offset = self.getTopRight().getOffset(loc) # Calculate offset from current top right return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
python
def setTopRight(self, loc): """ Move this region so its top right corner is on ``loc`` """ offset = self.getTopRight().getOffset(loc) # Calculate offset from current top right return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
[ "def", "setTopRight", "(", "self", ",", "loc", ")", ":", "offset", "=", "self", ".", "getTopRight", "(", ")", ".", "getOffset", "(", "loc", ")", "# Calculate offset from current top right", "return", "self", ".", "setLocation", "(", "self", ".", "getTopLeft", "(", ")", ".", "offset", "(", "offset", ")", ")", "# Move top left corner by the same offset" ]
Move this region so its top right corner is on ``loc``
[ "Move", "this", "region", "so", "its", "top", "right", "corner", "is", "on", "loc" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1233-L1236
train
glitchassassin/lackey
lackey/RegionMatching.py
Region.setBottomLeft
def setBottomLeft(self, loc): """ Move this region so its bottom left corner is on ``loc`` """ offset = self.getBottomLeft().getOffset(loc) # Calculate offset from current bottom left return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
python
def setBottomLeft(self, loc): """ Move this region so its bottom left corner is on ``loc`` """ offset = self.getBottomLeft().getOffset(loc) # Calculate offset from current bottom left return self.setLocation(self.getTopLeft().offset(offset)) # Move top left corner by the same offset
[ "def", "setBottomLeft", "(", "self", ",", "loc", ")", ":", "offset", "=", "self", ".", "getBottomLeft", "(", ")", ".", "getOffset", "(", "loc", ")", "# Calculate offset from current bottom left", "return", "self", ".", "setLocation", "(", "self", ".", "getTopLeft", "(", ")", ".", "offset", "(", "offset", ")", ")", "# Move top left corner by the same offset" ]
Move this region so its bottom left corner is on ``loc``
[ "Move", "this", "region", "so", "its", "bottom", "left", "corner", "is", "on", "loc" ]
7adadfacd7f45d81186710be992f5668b15399fe
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1237-L1240
train