code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
"""Module of functions to partition columns into segments.""" from collections import defaultdict from copy import deepcopy from typing import Callable, List import numpy as np import pandas as pd from sklearn.tree import _tree from runml_checks.tabular.dataset import Dataset from runml_checks.utils.strings import format_number from runml_checks.utils.typing import Hashable # TODO: move tabular functionality to the tabular sub-package __all__ = ['partition_column', 'runml_checksFilter', 'runml_checksBaseFilter', 'convert_tree_leaves_into_filters', 'intersect_two_filters', 'partition_numeric_feature_around_segment'] class runml_checksFilter: """Contains a filter function which works on a dataframe and a label describing the filter. Parameters ---------- filter_functions : List[Callable], default: None List of functions that receive a DataFrame and return a filter on it. If None, no filter is applied label : str, default = '' name of the filter """ def __init__(self, filter_functions: List[Callable] = None, label: str = ''): if not filter_functions: self.filter_functions = [] else: self.filter_functions = filter_functions self.label = label def filter(self, dataframe: pd.DataFrame) -> pd.DataFrame: """Run the filter on given dataframe. Return rows in data frame satisfying the filter properties.""" for func in self.filter_functions: dataframe = dataframe.loc[func(dataframe)] return dataframe class runml_checksBaseFilter(runml_checksFilter): """Extend runml_checksFilter class for feature range based filters. Parameters ---------- filters: dict, default: None A dictionary in containing feature names as keys and the filtering range as value. filter_functions : List[Callable], default: None List of functions that receive a DataFrame and return a filter on it. If None, no filter is applied label : str, default = '' Name of the filter """ def __init__(self, filters: dict = None, filter_functions: List[Callable] = None, label: str = ''): if filters is None: filters = defaultdict() self.filters = filters super().__init__(filter_functions, label) def add_filter(self, feature_name: str, threshold: float, greater_then: bool = True): """Add a filter by intersecting it with existing filter.""" if greater_then: filter_func = [lambda df, a=threshold: df[feature_name] > a] if feature_name in self.filters.keys(): original_range = self.filters[feature_name] self.filters[feature_name] = [max(threshold, original_range[0]), original_range[1]] else: self.filters[feature_name] = [threshold, np.inf] else: filter_func = [lambda df, a=threshold: df[feature_name] <= a] if feature_name in self.filters.keys(): original_range = self.filters[feature_name] self.filters[feature_name] = [original_range[0], min(threshold, original_range[1])] else: self.filters[feature_name] = [np.NINF, threshold] self.filter_functions += filter_func return self def copy(self): """Return a copy of the object.""" return runml_checksBaseFilter(self.filters.copy(), self.filter_functions.copy(), self.label) def intersect_two_filters(filter1: runml_checksFilter, filter2: runml_checksFilter) -> runml_checksFilter: """Merge two runml_checksFilters into one, an intersection of both filters.""" return runml_checksFilter(filter1.filter_functions + filter2.filter_functions) def partition_numeric_feature_around_segment(column: pd.Series, segment: List[float], max_additional_segments: int = 4) -> np.ndarray: """Split given series into segments containing specified segment. Tries to create segments as balanced as possible in size. Parameters ---------- column : pd.Series Series to be partitioned. segment : List[float] Segment to be included in the partition. max_additional_segments : int, default = 4 Maximum number of segments to be returned (not including the original segment). """ data_below_segment, data_above_segment = column[column <= segment[0]], column[column > segment[1]] if len(data_below_segment) + len(data_above_segment) == 0: return np.array([np.nanmin(column), np.nanmax(column)]) ratio = np.divide(len(data_below_segment), len(data_below_segment) + len(data_above_segment)) if len(data_below_segment) == 0: segments_below = np.array([np.nanmin(column)]) elif data_below_segment.nunique() == 1: segments_below = np.array([np.nanmin(column), segment[0]]) else: segments_below = numeric_segmentation_edges(data_below_segment, round(max_additional_segments * ratio)) segments_below = np.append(np.delete(segments_below, len(segments_below) - 1), segment[0]) if len(data_above_segment) == 0: segments_above = np.array([np.nanmax(column)]) elif data_above_segment.nunique() == 1: segments_above = np.array([segment[1], np.nanmax(column)]) else: segments_above = numeric_segmentation_edges(data_above_segment, round(max_additional_segments * (1 - ratio))) segments_above = np.append(segment[1], np.delete(segments_above, 0)) return np.unique(np.concatenate([segments_below, segments_above], axis=None)) def numeric_segmentation_edges(column: pd.Series, max_segments: int) -> np.ndarray: """Split given series into values which are used to create quantiles segments. Tries to create `max_segments + 1` values (since segment is a range, so 2 values needed to create segment) but in case some quantiles have the same value they will be filtered, and the result will have less than max_segments + 1 values. """ percentile_values = np.array([min(column), max(column)]) attempt_max_segments = max_segments prev_percentile_values = deepcopy(percentile_values) while len(percentile_values) < max_segments + 1: prev_percentile_values = deepcopy(percentile_values) percentile_values = pd.unique( np.nanpercentile(column.to_numpy(), np.linspace(0, 100, attempt_max_segments + 1)) ) if len(percentile_values) == len(prev_percentile_values): break attempt_max_segments *= 2 if len(percentile_values) > max_segments + 1: percentile_values = prev_percentile_values return percentile_values def largest_category_index_up_to_ratio(histogram, max_segments, max_cat_proportions): """Decide which categorical values are big enough to display individually. First check how many of the biggest categories needed in order to occupy `max_cat_proportions`% of the data. If the number is less than max_segments than return it, else return max_segments or number of unique values. """ total_values = sum(histogram.values) first_less_then_max_cat_proportions_idx = np.argwhere( histogram.values.cumsum() >= total_values * max_cat_proportions )[0][0] # Get index of last value in histogram to show return min(max_segments, histogram.size, first_less_then_max_cat_proportions_idx + 1) def partition_column( dataset: Dataset, column_name: Hashable, max_segments: int = 10, max_cat_proportions: float = 0.9, ) -> List[runml_checksFilter]: """Split column into segments. For categorical we'll have a max of max_segments + 1, for the 'Others'. We take the largest categories which cumulative percent in data is equal/larger than `max_cat_proportions`. the rest will go to 'Others' even if less than max_segments. For numerical we split into maximum number of `max_segments` quantiles. if some of the quantiles are duplicates then we merge them into the same segment range (so not all ranges necessarily will have same size). Parameters ---------- dataset : Dataset column_name : Hashable column to partition. max_segments : int, default: 10 maximum number of segments to split into. max_cat_proportions : float , default: 0.9 (for categorical) ratio to aggregate largest values to show. Returns ------- List[runml_checksFilter] """ column = dataset.data[column_name] if column_name in dataset.numerical_features: percentile_values = numeric_segmentation_edges(column, max_segments) # If for some reason only single value in the column (and column not categorical) we will get single item if len(percentile_values) == 1: f = lambda df, val=percentile_values[0]: (df[column_name] == val) label = str(percentile_values[0]) return [runml_checksFilter([f], label)] filters = [] for start, end in zip(percentile_values[:-1], percentile_values[1:]): # In case of the last range, the end is closed. if end == percentile_values[-1]: f = lambda df, a=start, b=end: (df[column_name] >= a) & (df[column_name] <= b) label = f'[{format_number(start)} - {format_number(end)}]' else: f = lambda df, a=start, b=end: (df[column_name] >= a) & (df[column_name] < b) label = f'[{format_number(start)} - {format_number(end)})' filters.append(runml_checksFilter([f], label)) return filters elif column_name in dataset.cat_features: # Get sorted histogram cat_hist_dict = column.value_counts() # Get index of last value in histogram to show n_large_cats = largest_category_index_up_to_ratio(cat_hist_dict, max_segments, max_cat_proportions) filters = [] for i in range(n_large_cats): f = lambda df, val=cat_hist_dict.index[i]: df[column_name] == val filters.append(runml_checksFilter([f], str(cat_hist_dict.index[i]))) if len(cat_hist_dict) > n_large_cats: f = lambda df, values=cat_hist_dict.index[:n_large_cats]: ~df[column_name].isin(values) filters.append(runml_checksFilter([f], 'Others')) return filters def convert_tree_leaves_into_filters(tree, feature_names: List[str]) -> List[runml_checksBaseFilter]: """Extract the leaves from a sklearn tree and covert them into runml_checksBaseFilter. The function goes over the tree from root to leaf and concatenates (by intersecting) the relevant filters along the way. The function returns a list in which each element is a runml_checksFilter representing the path between the root to a different leaf. Parameters ---------- tree : A sklearn tree. The tree_ property of a sklearn decision tree. feature_names : List[str] The feature names for elements within the tree. Normally it is the column names of the data frame the tree was trained on. Returns ------- List[runml_checksFilter]: A list of filters describing the leaves of the tree. """ node_to_feature = [feature_names[feature_idx] if feature_idx != _tree.TREE_UNDEFINED else None for feature_idx in tree.feature] def recurse(node_idx: int, filter_of_node: runml_checksBaseFilter): if tree.feature[node_idx] != _tree.TREE_UNDEFINED: left_node_filter = filter_of_node.copy().add_filter(node_to_feature[node_idx], tree.threshold[node_idx], greater_then=False) right_node_filter = filter_of_node.copy().add_filter(node_to_feature[node_idx], tree.threshold[node_idx]) return recurse(tree.children_left[node_idx], left_node_filter) + \ recurse(tree.children_right[node_idx], right_node_filter) else: return [filter_of_node] filters_to_leaves = recurse(0, runml_checksBaseFilter()) return filters_to_leaves
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/utils/performance/partition.py
0.845241
0.720467
partition.py
pypi
"""Module for base tabular context.""" import typing as t import numpy as np import pandas as pd from runml_checks import CheckFailure, CheckResult from runml_checks.core import DatasetKind from runml_checks.core.errors import (DatasetValidationError, runml_checksNotSupportedError, runml_checksValueError, ModelValidationError) from runml_checks.tabular._shared_docs import docstrings from runml_checks.tabular.dataset import Dataset from runml_checks.tabular.utils.task_type import TaskType from runml_checks.tabular.utils.validation import (ensure_predictions_proba, ensure_predictions_shape, model_type_validation, validate_model) from runml_checks.utils.decorators import deprecate_kwarg from runml_checks.utils.features import calculate_feature_importance_or_none from runml_checks.utils.logger import get_logger from runml_checks.utils.metrics import get_default_scorers, init_validate_scorers, task_type_check from runml_checks.utils.typing import BasicModel __all__ = [ 'Context', '_DummyModel' ] class _DummyModel: """Dummy model class used for inference with static predictions from the user. Parameters ---------- train: Dataset Dataset, representing data an estimator was fitted on. test: Dataset Dataset, representing data an estimator predicts on. y_pred_train: np.ndarray Array of the model prediction over the train dataset. y_pred_test: np.ndarray Array of the model prediction over the test dataset. y_proba_train: np.ndarray Array of the model prediction probabilities over the train dataset. y_proba_test: np.ndarray Array of the model prediction probabilities over the test dataset. validate_data_on_predict: bool, default = True If true, before predicting validates that the received data samples have the same index as in original data. """ feature_df_list: t.List[pd.DataFrame] predictions: pd.DataFrame proba: pd.DataFrame def __init__(self, test: Dataset, y_pred_test: t.Union[np.ndarray, t.List[t.Hashable]], y_proba_test: np.ndarray, train: t.Union[Dataset, None] = None, y_pred_train: t.Union[np.ndarray, t.List[t.Hashable], None] = None, y_proba_train: t.Union[np.ndarray, None] = None, validate_data_on_predict: bool = True): if train is not None and test is not None: # check if datasets have same indexes if set(train.data.index) & set(test.data.index): train.data.index = map(lambda x: f'train-{x}', list(train.data.index)) test.data.index = map(lambda x: f'test-{x}', list(test.data.index)) get_logger().warning('train and test datasets have common index - adding "train"/"test"' ' prefixes. To avoid that provide datasets with no common indexes ' 'or pass the model object instead of the predictions.') feature_df_list = [] predictions = [] probas = [] for dataset, y_pred, y_proba in zip([train, test], [y_pred_train, y_pred_test], [y_proba_train, y_proba_test]): if dataset is not None: feature_df_list.append(dataset.features_columns) if y_pred is None and y_proba is not None: y_pred = np.argmax(y_proba, axis=-1) if y_pred is not None: if len(y_pred.shape) > 1 and y_pred.shape[1] == 1: y_pred = y_pred[:, 0] ensure_predictions_shape(y_pred, dataset.data) predictions.append(pd.Series(y_pred, index=dataset.data.index)) if y_proba is not None: ensure_predictions_proba(y_proba, y_pred) probas.append(pd.DataFrame(data=y_proba, index=dataset.data.index)) self.predictions = pd.concat(predictions, axis=0) if predictions else None self.probas = pd.concat(probas, axis=0) if probas else None self.feature_df_list = feature_df_list self.validate_data_on_predict = validate_data_on_predict if self.predictions is not None: self.predict = self._predict if self.probas is not None: self.predict_proba = self._predict_proba def _validate_data(self, data: pd.DataFrame): # Validate only up to 100 samples data = data.sample(min(100, len(data))) for feature_df in self.feature_df_list: # If all indices are found than test for equality if set(data.index).issubset(set(feature_df.index)): # If equal than data is valid, can return if feature_df.loc[data.index].fillna('').equals(data.fillna('')): return else: raise runml_checksValueError('Data that has not been seen before passed for inference with static ' 'predictions. Pass a real model to resolve this') raise runml_checksValueError('Data with indices that has not been seen before passed for inference with static ' 'predictions. Pass a real model to resolve this') def _predict(self, data: pd.DataFrame): """Predict on given data by the data indexes.""" if self.validate_data_on_predict: self._validate_data(data) return self.predictions.loc[data.index].to_numpy() def _predict_proba(self, data: pd.DataFrame): """Predict probabilities on given data by the data indexes.""" if self.validate_data_on_predict: self._validate_data(data) return self.probas.loc[data.index].to_numpy() def fit(self, *args, **kwargs): """Just for python 3.6 (sklearn validates fit method).""" @docstrings class Context: """Contains all the data + properties the user has passed to a check/suite, and validates it seamlessly. Parameters ---------- train: Union[Dataset, pd.DataFrame, None] , default: None Dataset or DataFrame object, representing data an estimator was fitted on test: Union[Dataset, pd.DataFrame, None] , default: None Dataset or DataFrame object, representing data an estimator predicts on model: Optional[BasicModel] , default: None A scikit-learn-compatible fitted estimator instance {additional_context_params:indent} """ @deprecate_kwarg(old_name='features_importance', new_name='feature_importance') def __init__( self, train: t.Union[Dataset, pd.DataFrame, None] = None, test: t.Union[Dataset, pd.DataFrame, None] = None, model: t.Optional[BasicModel] = None, model_name: str = '', feature_importance: t.Optional[pd.Series] = None, feature_importance_force_permutation: bool = False, feature_importance_timeout: int = 120, scorers: t.Optional[t.Mapping[str, t.Union[str, t.Callable]]] = None, scorers_per_class: t.Optional[t.Mapping[str, t.Union[str, t.Callable]]] = None, with_display: bool = True, y_pred_train: t.Optional[np.ndarray] = None, y_pred_test: t.Optional[np.ndarray] = None, y_proba_train: t.Optional[np.ndarray] = None, y_proba_test: t.Optional[np.ndarray] = None, ): # Validations if train is None and test is None and model is None: raise runml_checksValueError('At least one dataset (or model) must be passed to the method!') if train is not None: train = Dataset.cast_to_dataset(train) if test is not None: test = Dataset.cast_to_dataset(test) # If both dataset, validate they fit each other if train and test: if not Dataset.datasets_share_label(train, test): raise DatasetValidationError('train and test requires to have and to share the same label') if not Dataset.datasets_share_features(train, test): raise DatasetValidationError('train and test requires to share the same features columns') if not Dataset.datasets_share_categorical_features(train, test): raise DatasetValidationError( 'train and test datasets should share ' 'the same categorical features. Possible reason is that some columns were' 'inferred incorrectly as categorical features. To fix this, manually edit the ' 'categorical features using Dataset(cat_features=<list_of_features>' ) if not Dataset.datasets_share_index(train, test): raise DatasetValidationError('train and test requires to share the same index column') if not Dataset.datasets_share_date(train, test): raise DatasetValidationError('train and test requires to share the same date column') if test and not train: raise DatasetValidationError('Can\'t initialize context with only test. if you have single dataset, ' 'initialize it as train') if model is None and \ not pd.Series([y_pred_train, y_pred_test, y_proba_train, y_proba_test]).isna().all(): model = _DummyModel(train=train, test=test, y_pred_train=y_pred_train, y_pred_test=y_pred_test, y_proba_test=y_proba_test, y_proba_train=y_proba_train) if model is not None: # Here validate only type of model, later validating it can predict on the data if needed model_type_validation(model) if feature_importance is not None: if not isinstance(feature_importance, pd.Series): raise runml_checksValueError('feature_importance must be a pandas Series') self._train = train self._test = test self._model = model self._feature_importance_force_permutation = feature_importance_force_permutation self._feature_importance = feature_importance self._feature_importance_timeout = feature_importance_timeout self._calculated_importance = feature_importance is not None self._importance_type = None self._validated_model = False self._task_type = None self._user_scorers = scorers self._user_scorers_per_class = scorers_per_class self._model_name = model_name self._with_display = with_display # Properties # Validations note: We know train & test fit each other so all validations can be run only on train @property def with_display(self) -> bool: """Return the with_display flag.""" return self._with_display @property def train(self) -> Dataset: """Return train if exists, otherwise raise error.""" if self._train is None: raise runml_checksNotSupportedError('Check is irrelevant for Datasets without train dataset') return self._train @property def test(self) -> Dataset: """Return test if exists, otherwise raise error.""" if self._test is None: raise runml_checksNotSupportedError('Check is irrelevant for Datasets without test dataset') return self._test @property def model(self) -> BasicModel: """Return & validate model if model exists, otherwise raise error.""" if self._model is None: raise runml_checksNotSupportedError('Check is irrelevant for Datasets without model') if not self._validated_model: if self._train: validate_model(self._train, self._model) self._validated_model = True return self._model @property def model_name(self): """Return model name.""" return self._model_name @property def task_type(self) -> TaskType: """Return task type if model & train & label exists. otherwise, raise error.""" if self._task_type is None: self._task_type = task_type_check(self.model, self.train) return self._task_type @property def features_importance(self) -> t.Optional[pd.Series]: """Return feature importance, or None if not possible.""" # TODO: remove in future get_logger().warning('"features_importance" property is deprecated use "feature_importance" instead') return self.feature_importance @property def feature_importance(self) -> t.Optional[pd.Series]: """Return feature importance, or None if not possible.""" if not self._calculated_importance: if self._model and (self._train or self._test): permutation_kwargs = {'timeout': self._feature_importance_timeout} dataset = self.test if self.have_test() else self.train importance, importance_type = calculate_feature_importance_or_none( self._model, dataset, self._feature_importance_force_permutation, permutation_kwargs ) self._feature_importance = importance self._importance_type = importance_type else: self._feature_importance = None self._calculated_importance = True return self._feature_importance @property def features_importance_type(self) -> t.Optional[str]: """Return feature importance type if feature importance is available, else None.""" # TODO: remove in future get_logger().warning('"features_importance_type" property is deprecated use "feature_importance_type" instead') return self.feature_importance_type @property def feature_importance_type(self) -> t.Optional[str]: """Return feature importance type if feature importance is available, else None.""" # Calling first feature_importance, because _importance_type is assigned only after feature importance is # calculated. if self.feature_importance: return self._importance_type return None def have_test(self): """Return whether there is test dataset defined.""" return self._test is not None def assert_task_type(self, *expected_types: TaskType): """Assert task_type matching given types. If task_type is defined, validate it and raise error if needed, else returns True. If task_type is not defined, return False. """ # To calculate task type we need model and train. if not exists return False, means we did not validate if self._model is None or self._train is None: return False if self.task_type not in expected_types: raise ModelValidationError( f'Check is relevant for models of type {[e.value.lower() for e in expected_types]}, ' f"but received model of type '{self.task_type.value.lower()}'" # pylint: disable=inconsistent-quotes ) return True def assert_classification_task(self): """Assert the task_type is classification.""" # assert_task_type makes assertion if task type exists and returns True, else returns False # If not task type than check label type if (not self.assert_task_type(TaskType.MULTICLASS, TaskType.BINARY) and self.train.label_type == TaskType.REGRESSION): raise ModelValidationError('Check is irrelevant for regressions tasks') def assert_regression_task(self): """Assert the task type is regression.""" # assert_task_type makes assertion if task type exists and returns True, else returns False # If not task type than check label type if (not self.assert_task_type(TaskType.REGRESSION) and self.train.label_type != TaskType.REGRESSION): raise ModelValidationError('Check is irrelevant for classification tasks') def get_scorers(self, alternative_scorers: t.Mapping[str, t.Union[str, t.Callable]] = None, class_avg=True): """Return initialized & validated scorers in a given priority. If receive `alternative_scorers` return them, Else if user defined global scorers return them, Else return default scorers. Parameters ---------- alternative_scorers : Mapping[str, Union[str, Callable]], default None dict of scorers names to scorer sklearn_name/function class_avg : bool, default True for classification whether to return scorers of average score or score per class """ if class_avg: user_scorers = self._user_scorers else: user_scorers = self._user_scorers_per_class scorers = alternative_scorers or user_scorers or get_default_scorers(self.task_type, class_avg) return init_validate_scorers(scorers, self.model, self.train, class_avg, self.task_type) def get_single_scorer(self, alternative_scorers: t.Mapping[str, t.Union[str, t.Callable]] = None, class_avg=True): """Return initialized & validated single scorer in a given priority. If receive `alternative_scorers` use them, Else if user defined global scorers use them, Else use default scorers. Returns the first scorer from the scorers described above. Parameters ---------- alternative_scorers : Mapping[str, Union[str, Callable]], default None dict of scorers names to scorer sklearn_name/function. Only first scorer will be used. class_avg : bool, default True for classification whether to return scorers of average score or score per class """ if class_avg: user_scorers = self._user_scorers else: user_scorers = self._user_scorers_per_class scorers = alternative_scorers or user_scorers or get_default_scorers(self.task_type, class_avg) # The single scorer is the first one in the dict scorer_name = next(iter(scorers)) single_scorer_dict = {scorer_name: scorers[scorer_name]} return init_validate_scorers(single_scorer_dict, self.model, self.train, class_avg, self.task_type)[0] def get_data_by_kind(self, kind: DatasetKind): """Return the relevant Dataset by given kind.""" if kind == DatasetKind.TRAIN: return self.train elif kind == DatasetKind.TEST: return self.test else: raise runml_checksValueError(f'Unexpected dataset kind {kind}') def finalize_check_result(self, check_result, check, kind: DatasetKind = None): """Run final processing on a check result which includes validation, conditions processing and sampling\ footnote.""" # Validate the check result type if isinstance(check_result, CheckFailure): return if not isinstance(check_result, CheckResult): raise runml_checksValueError(f'Check {check.name()} expected to return CheckResult but got: ' + type(check_result).__name__) # Set reference between the check result and check check_result.check = check # Calculate conditions results check_result.process_conditions() # Add sampling footnote if needed if hasattr(check, 'n_samples'): n_samples = getattr(check, 'n_samples') message = '' if kind: dataset = self.get_data_by_kind(kind) if dataset.is_sampled(n_samples): message = f'Data is sampled from the original dataset, running on ' \ f'{dataset.len_when_sampled(n_samples)} samples out of {len(dataset)}.' else: if self._train is not None and self._train.is_sampled(n_samples): message += f'Running on {self._train.len_when_sampled(n_samples)} <b>train</b> data samples ' \ f'out of {len(self._train)}.' if self._test is not None and self._test.is_sampled(n_samples): if message: message += ' ' message += f'Running on {self._test.len_when_sampled(n_samples)} <b>test</b> data samples ' \ f'out of {len(self._test)}.' if message: message = ('<p style="font-size:0.9em;line-height:1;"><i>' f'Note - data sampling: {message} Sample size can be controlled with the "n_samples" ' 'parameter.</i></p>') check_result.display.append(message)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/context.py
0.907876
0.451206
context.py
pypi
"""This file changes default 'ignore' action of DeprecationWarnings for specific deprecation messages.""" import warnings # Added in version 0.6.2, deprecates max_num_categories in all drift checks warnings.filterwarnings( action='always', message=r'.*max_num_categories.*', category=DeprecationWarning, module=r'runml_checks.*' ) # Added in 0.7 Warning filters for deprecated functions in runml_checks.tabular.checks # Should be removed in 0.8 warnings.filterwarnings( action='once', message=r'runml_checks\.tabular\.checks\.performance is deprecated.*', category=DeprecationWarning, module=r'runml_checks.*' ) warnings.filterwarnings( action='once', message=r'runml_checks.tabular.checks.overview is deprecated.*', category=DeprecationWarning, module=r'runml_checks.*' ) warnings.filterwarnings( action='once', message=r'runml_checks.tabular.checks.methodology is deprecated.*', category=DeprecationWarning, module=r'runml_checks.tabular.checks.methodology.*' ) warnings.filterwarnings( action='once', message=r'runml_checks.tabular.checks.distribution is deprecated.*', category=DeprecationWarning, module=r'runml_checks.*' ) warnings.filterwarnings( action='always', message=r'the single_dataset_integrity suite is deprecated.*', category=DeprecationWarning, module=r'runml_checks.*' ) warnings.filterwarnings( action='always', message=r'the train_test_leakage suite is deprecated.*', category=DeprecationWarning, module=r'runml_checks.*' ) # Added in 0.7 Warning filters for drift conditions # Should be removed in 0.8 warnings.filterwarnings( action='once', message=r'.*max_allowed_psi_score is deprecated.*', category=DeprecationWarning, module=r'runml_checks.*' ) warnings.filterwarnings( action='once', message=r'.*max_allowed_earth_movers_score is deprecated.*', category=DeprecationWarning, module=r'runml_checks.*' ) warnings.filterwarnings( action='once', message=r'the identifier_leakage check is deprecated.*', category=DeprecationWarning, module=r'runml_checks.*' ) warnings.filterwarnings( action='once', message=r'The DominantFrequencyChange check is deprecated.*', category=DeprecationWarning, module=r'runml_checks.*' )
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/deprecation_warnings.py
0.54698
0.1602
deprecation_warnings.py
pypi
"""The dataset module containing the tabular Dataset class and its functions.""" # pylint: disable=inconsistent-quotes,protected-access import typing as t import numpy as np import pandas as pd from IPython.display import HTML, display_html from pandas.api.types import infer_dtype from sklearn.model_selection import train_test_split from typing_extensions import Literal as L from runml_checks.core.errors import DatasetValidationError, runml_checksNotSupportedError, runml_checksValueError from runml_checks.tabular.utils.task_type import TaskType from runml_checks.utils.dataframes import select_from_dataframe from runml_checks.utils.features import infer_categorical_features, infer_numerical_features, is_categorical from runml_checks.utils.logger import get_logger from runml_checks.utils.strings import get_docs_link from runml_checks.utils.typing import Hashable __all__ = ['Dataset'] TDataset = t.TypeVar('TDataset', bound='Dataset') DatasetReprFmt = t.Union[L['string'], L['html']] # noqa: F821 class Dataset: """ Dataset wraps pandas DataFrame together with ML related metadata. The Dataset class is containing additional data and methods intended for easily accessing metadata relevant for the training or validating of an ML models. Parameters ---------- df : Any An object that can be casted to a pandas DataFrame - containing data relevant for the training or validating of a ML models. label : t.Union[Hashable, pd.Series, pd.DataFrame, np.ndarray] , default: None label column provided either as a string with the name of an existing column in the DataFrame or a label object including the label data (pandas Series/DataFrame or a numpy array) that will be concatenated to the data in the DataFrame. in case of label data the following logic is applied to set the label name: - Series: takes the series name or 'target' if name is empty - DataFrame: expect single column in the dataframe and use its name - numpy: use 'target' features : t.Optional[t.Sequence[Hashable]] , default: None List of names for the feature columns in the DataFrame. cat_features : t.Optional[t.Sequence[Hashable]] , default: None List of names for the categorical features in the DataFrame. In order to disable categorical. features inference, pass cat_features=[] index_name : t.Optional[Hashable] , default: None Name of the index column in the dataframe. If set_index_from_dataframe_index is True and index_name is not None, index will be created from the dataframe index level with the given name. If index levels have no names, an int must be used to select the appropriate level by order. set_index_from_dataframe_index : bool , default: False If set to true, index will be created from the dataframe index instead of dataframe columns (default). If index_name is None, first level of the index will be used in case of a multilevel index. datetime_name : t.Optional[Hashable] , default: None Name of the datetime column in the dataframe. If set_datetime_from_dataframe_index is True and datetime_name is not None, date will be created from the dataframe index level with the given name. If index levels have no names, an int must be used to select the appropriate level by order. set_datetime_from_dataframe_index : bool , default: False If set to true, date will be created from the dataframe index instead of dataframe columns (default). If datetime_name is None, first level of the index will be used in case of a multilevel index. convert_datetime : bool , default: True If set to true, date will be converted to datetime using pandas.to_datetime. datetime_args : t.Optional[t.Dict] , default: None pandas.to_datetime args used for conversion of the datetime column. (look at https://pandas.pydata.org/docs/reference/api/pandas.to_datetime.html for more documentation) max_categorical_ratio : float , default: 0.01 The max ratio of unique values in a column in order for it to be inferred as a categorical feature. max_categories : int , default: None The maximum number of categories in a column in order for it to be inferred as a categorical feature. if None, uses is_categorical default inference mechanism. label_type : str , default: None Used to assume target model type if not found on model. Values ('classification_label', 'regression_label') If None then label type is inferred from label using is_categorical logic. """ _features: t.List[Hashable] _label_name: t.Optional[Hashable] _index_name: t.Optional[Hashable] _set_index_from_dataframe_index: t.Optional[bool] _datetime_name: t.Optional[Hashable] _set_datetime_from_dataframe_index: t.Optional[bool] _convert_datetime: t.Optional[bool] _datetime_column: t.Optional[pd.Series] _cat_features: t.List[Hashable] _data: pd.DataFrame _max_categorical_ratio: float _max_categories: int _label_type: t.Optional[TaskType] _classes: t.Tuple[str, ...] def __init__( self, df: t.Any, label: t.Union[Hashable, pd.Series, pd.DataFrame, np.ndarray] = None, features: t.Optional[t.Sequence[Hashable]] = None, cat_features: t.Optional[t.Sequence[Hashable]] = None, index_name: t.Optional[Hashable] = None, set_index_from_dataframe_index: bool = False, datetime_name: t.Optional[Hashable] = None, set_datetime_from_dataframe_index: bool = False, convert_datetime: bool = True, datetime_args: t.Optional[t.Dict] = None, max_categorical_ratio: float = 0.01, max_categories: int = None, label_type: str = None ): if len(df) == 0: raise runml_checksValueError('Can\'t create a Dataset object with an empty dataframe') self._data = pd.DataFrame(df).copy() # Validations if label is None: label_name = None elif isinstance(label, (pd.Series, pd.DataFrame, np.ndarray)): label_name = None if isinstance(label, pd.Series): # Set label name if exists if label.name is not None: label_name = label.name if label_name in self._data.columns: raise runml_checksValueError(f'Data has column with name "{label_name}", use pandas rename to' f' change label name or remove the column from the dataframe') elif isinstance(label, pd.DataFrame): # Validate shape if label.shape[1] > 1: raise runml_checksValueError('Label must have a single column') # Set label name label_name = label.columns[0] label = label[label_name] if label_name in self._data.columns: raise runml_checksValueError(f'Data has column with name "{label_name}", change label column ' f'or remove the column from the data dataframe') elif isinstance(label, np.ndarray): # Validate label shape if len(label.shape) > 2: raise runml_checksValueError('Label must be either column vector or row vector') elif len(label.shape) == 2: if all(x != 1 for x in label.shape): raise runml_checksValueError('Label must be either column vector or row vector') label = np.squeeze(label) # Validate length of label if label.shape[0] != self._data.shape[0]: raise runml_checksValueError('Number of samples of label and data must be equal') # If no label found to set, then set default name if label_name is None: label_name = 'target' if label_name in self._data.columns: raise runml_checksValueError('Can\'t set default label name "target" since it already exists in ' 'the dataframe. use pandas name parameter to give the label a ' 'unique name') # Set label data in dataframe if isinstance(label, pd.Series): pd.testing.assert_index_equal(self._data.index, label.index) self._data[label_name] = label else: self._data[label_name] = np.array(label).reshape(-1, 1) elif isinstance(label, Hashable): label_name = label if label_name not in self._data.columns: raise runml_checksValueError(f'label column {label_name} not found in dataset columns') else: raise runml_checksValueError(f'Unsupported type for label: {type(label).__name__}') # Assert that the requested index can be found if not set_index_from_dataframe_index: if index_name is not None and index_name not in self._data.columns: error_message = f'Index column {index_name} not found in dataset columns.' if index_name == 'index': error_message += ' If you attempted to use the dataframe index, set ' \ 'set_index_from_dataframe_index to True instead.' raise runml_checksValueError(error_message) else: if index_name is not None: if isinstance(index_name, str): if index_name not in self._data.index.names: raise runml_checksValueError(f'Index {index_name} not found in dataframe index level names.') elif isinstance(index_name, int): if index_name > (len(self._data.index.names) - 1): raise runml_checksValueError(f'Dataframe index has less levels than {index_name + 1}.') else: raise runml_checksValueError(f'When set_index_from_dataframe_index is True index_name can be None,' f' int or str, but found {type(index_name)}') # Assert that the requested datetime can be found if not set_datetime_from_dataframe_index: if datetime_name is not None and datetime_name not in self._data.columns: error_message = f'Datetime column {datetime_name} not found in dataset columns.' if datetime_name == 'date': error_message += ' If you attempted to use the dataframe index, ' \ 'set set_datetime_from_dataframe_index to True instead.' raise runml_checksValueError(error_message) else: if datetime_name is not None: if isinstance(datetime_name, str): if datetime_name not in self._data.index.names: raise runml_checksValueError( f'Datetime {datetime_name} not found in dataframe index level names.' ) elif isinstance(datetime_name, int): if datetime_name > (len(self._data.index.names) - 1): raise runml_checksValueError(f'Dataframe index has less levels than {datetime_name + 1}.') else: raise runml_checksValueError(f'When set_index_from_dataframe_index is True index_name can be None,' f' int or str, but found {type(index_name)}') self._datetime_column = self.get_datetime_column_from_index(datetime_name) if features is not None: difference = set(features) - set(self._data.columns) if len(difference) > 0: raise runml_checksValueError('Features must be names of columns in dataframe. ' f'Features {difference} have not been ' 'found in input dataframe.') self._features = list(features) else: self._features = [x for x in self._data.columns if x not in {label_name, index_name if not set_index_from_dataframe_index else None, datetime_name if not set_datetime_from_dataframe_index else None}] self._label_name = label_name self._index_name = index_name self._set_index_from_dataframe_index = set_index_from_dataframe_index self._convert_datetime = convert_datetime self._datetime_name = datetime_name self._set_datetime_from_dataframe_index = set_datetime_from_dataframe_index self._datetime_args = datetime_args or {} self._max_categorical_ratio = max_categorical_ratio self._max_categories = max_categories self._classes = None if self._label_name in self.features: raise runml_checksValueError(f'label column {self._label_name} can not be a feature column') if self._datetime_name in self.features: raise runml_checksValueError(f'datetime column {self._datetime_name} can not be a feature column') if self._index_name in self.features: raise runml_checksValueError(f'index column {self._index_name} can not be a feature column') if cat_features is not None: if set(cat_features).intersection(set(self._features)) != set(cat_features): raise runml_checksValueError('Categorical features must be a subset of features. ' f'Categorical features {set(cat_features) - set(self._features)} ' 'have not been found in feature list.') self._cat_features = list(cat_features) else: self._cat_features = self._infer_categorical_features( self._data, max_categorical_ratio=max_categorical_ratio, max_categories=max_categories, columns=self._features ) if ((self._datetime_name is not None) or self._set_datetime_from_dataframe_index) and convert_datetime: if self._set_datetime_from_dataframe_index: self._datetime_column = pd.to_datetime(self._datetime_column, **self._datetime_args) else: self._data[self._datetime_name] = pd.to_datetime(self._data[self._datetime_name], **self._datetime_args) if label_type and self._label_name: if label_type == 'regression_label': self._label_type = TaskType.REGRESSION elif label_type == 'classification_label': if self.data[self._label_name].nunique() > 2: self._label_type = TaskType.MULTICLASS else: self._label_type = TaskType.BINARY else: get_logger().warning('Label type %s is not valid, auto inferring label type.' ' Possible values are regression_label or classification_label.', label_type) if self._label_name and not hasattr(self, "label_type"): self._label_type = self._infer_label_type(self.data[self._label_name]) elif not hasattr(self, "label_type"): self._label_type = None unassigned_cols = [col for col in self._features if col not in self._cat_features] self._numerical_features = infer_numerical_features(self._data[unassigned_cols]) @classmethod def from_numpy( cls: t.Type[TDataset], *args: np.ndarray, columns: t.Sequence[Hashable] = None, label_name: t.Hashable = None, **kwargs ) -> TDataset: """Create Dataset instance from numpy arrays. Parameters ---------- *args: np.ndarray Numpy array of data columns, and second optional numpy array of labels. columns : t.Sequence[Hashable] , default: None names for the columns. If none provided, the names that will be automatically assigned to the columns will be: 1 - n (where n - number of columns) label_name : t.Hashable , default: None labels column name. If none is provided, the name 'target' will be used. **kwargs : Dict additional arguments that will be passed to the main Dataset constructor. Returns ------- Dataset instance of the Dataset Raises ------ runml_checksValueError if receives zero or more than two numpy arrays. if columns (args[0]) is not two dimensional numpy array. if labels (args[1]) is not one dimensional numpy array. if features array or labels array is empty. Examples -------- >>> import numpy >>> from runml_checks.tabular import Dataset >>> features = numpy.array([[0.25, 0.3, 0.3], ... [0.14, 0.75, 0.3], ... [0.23, 0.39, 0.1]]) >>> labels = numpy.array([0.1, 0.1, 0.7]) >>> dataset = Dataset.from_numpy(features, labels) Creating dataset only from features array. >>> dataset = Dataset.from_numpy(features) Passing additional arguments to the main Dataset constructor >>> dataset = Dataset.from_numpy(features, labels, max_categorical_ratio=0.5) Specifying features and label columns names. >>> dataset = Dataset.from_numpy( ... features, labels, ... columns=['sensor-1', 'sensor-2', 'sensor-3'], ... label_name='labels' ... ) """ if len(args) == 0 or len(args) > 2: raise runml_checksValueError( "'from_numpy' constructor expecting to receive two numpy arrays (or at least one)." "First array must contains the columns and second the labels." ) columns_array = args[0] columns_error_message = ( "'from_numpy' constructor expecting columns (args[0]) " "to be not empty two dimensional array." ) if len(columns_array.shape) != 2: raise runml_checksValueError(columns_error_message) if columns_array.shape[0] == 0 or columns_array.shape[1] == 0: raise runml_checksValueError(columns_error_message) if columns is not None and len(columns) != columns_array.shape[1]: raise runml_checksValueError( f'{columns_array.shape[1]} columns were provided ' f'but only {len(columns)} name(s) for them`s.' ) elif columns is None: columns = [str(index) for index in range(1, columns_array.shape[1] + 1)] if len(args) == 1: labels_array = None else: labels_array = args[1] if len(labels_array.shape) != 1 or labels_array.shape[0] == 0: raise runml_checksValueError( "'from_numpy' constructor expecting labels (args[1]) " "to be not empty one dimensional array." ) labels_array = pd.Series(labels_array) if label_name: labels_array = labels_array.rename(label_name) return cls( df=pd.DataFrame(data=columns_array, columns=columns), label=labels_array, **kwargs ) @property def data(self) -> pd.DataFrame: """Return the data of dataset.""" return self._data def copy(self: TDataset, new_data: pd.DataFrame) -> TDataset: """Create a copy of this Dataset with new data. Parameters ---------- new_data (DataFrame): new data from which new dataset will be created Returns ------- Dataset new dataset instance """ # Filter out if columns were dropped features = [feat for feat in self._features if feat in new_data.columns] cat_features = [feat for feat in self.cat_features if feat in new_data.columns] label_name = self._label_name if self._label_name in new_data.columns else None if self._label_type == TaskType.REGRESSION: label_type = 'regression_label' elif self._label_type in [TaskType.BINARY, TaskType.MULTICLASS]: label_type = 'classification_label' else: label_type = None index = self._index_name if self._index_name in new_data.columns else None date = self._datetime_name if self._datetime_name in new_data.columns else None cls = type(self) return cls(new_data, features=features, cat_features=cat_features, label=label_name, index_name=index, set_index_from_dataframe_index=self._set_index_from_dataframe_index, datetime_name=date, set_datetime_from_dataframe_index=self._set_datetime_from_dataframe_index, convert_datetime=self._convert_datetime, max_categorical_ratio=self._max_categorical_ratio, max_categories=self._max_categories, label_type=label_type) def sample(self: TDataset, n_samples: int, replace: bool = False, random_state: t.Optional[int] = None, drop_na_label: bool = False) -> TDataset: """Create a copy of the dataset object, with the internal dataframe being a sample of the original dataframe. Parameters ---------- n_samples : int Number of samples to draw. replace : bool, default: False Whether to sample with replacement. random_state : t.Optional[int] , default None Random state. drop_na_label : bool, default: False Whether to take sample only from rows with exiting label. Returns ------- Dataset instance of the Dataset with sampled internal dataframe. """ if drop_na_label and self.label_name: valid_idx = self.data[self.label_name].notna() data_to_sample = self.data[valid_idx] else: data_to_sample = self.data n_samples = min(n_samples, len(data_to_sample)) return self.copy(data_to_sample.sample(n_samples, replace=replace, random_state=random_state)) @property def n_samples(self) -> int: """Return number of samples in dataframe. Returns ------- int Number of samples in dataframe """ return self.data.shape[0] @property def label_type(self) -> t.Optional[TaskType]: """Return the label type. Returns ------- t.Optional[TaskType] Label type """ return self._label_type def train_test_split(self: TDataset, train_size: t.Union[int, float, None] = None, test_size: t.Union[int, float] = 0.25, random_state: int = 42, shuffle: bool = True, stratify: t.Union[t.List, pd.Series, np.ndarray, bool] = False ) -> t.Tuple[TDataset, TDataset]: """Split dataset into random train and test datasets. Parameters ---------- train_size : t.Union[int, float, None] , default: None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. test_size : t.Union[int, float] , default: 0.25 If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. random_state : int , default: 42 The random state to use for shuffling. shuffle : bool , default: True Whether to shuffle the data before splitting. stratify : t.Union[t.List, pd.Series, np.ndarray, bool] , default: False If True, data is split in a stratified fashion, using the class labels. If array-like, data is split in a stratified fashion, using this as class labels. Returns ------- Dataset Dataset containing train split data. Dataset Dataset containing test split data. """ if isinstance(stratify, bool): stratify = self.label_col if stratify else None train_df, test_df = train_test_split(self._data, test_size=test_size, train_size=train_size, random_state=random_state, shuffle=shuffle, stratify=stratify) return self.copy(train_df), self.copy(test_df) @staticmethod def _infer_label_type(label_col: pd.Series): if is_categorical(label_col, max_categorical_ratio=0.05): if label_col.nunique(dropna=True) > 2: return TaskType.MULTICLASS else: return TaskType.BINARY else: return TaskType.REGRESSION @staticmethod def _infer_categorical_features( df: pd.DataFrame, max_categorical_ratio: float, max_categories: int = None, columns: t.Optional[t.List[Hashable]] = None, ) -> t.List[Hashable]: """Infers which features are categorical by checking types and number of unique values. Parameters ---------- df: pd.DataFrame max_categorical_ratio: float max_categories: int , default: None columns: t.Optional[t.List[Hashable]] , default: None Returns ------- t.List[Hashable] Out of the list of feature names, returns list of categorical features """ categorical_columns = infer_categorical_features( df, max_categorical_ratio=max_categorical_ratio, max_categories=max_categories, columns=columns ) message = ('It is recommended to initialize Dataset with categorical features by doing ' '"Dataset(df, cat_features=categorical_list)". No categorical features were passed, therefore ' 'heuristically inferring categorical features in the data. ' f'{len(categorical_columns)} categorical features were inferred.') if len(categorical_columns) > 0: columns_to_print = categorical_columns[:7] message += ': ' + ', '.join(list(map(str, columns_to_print))) if len(categorical_columns) > len(columns_to_print): message += '... For full list use dataset.cat_features' get_logger().warning(message) return categorical_columns def is_categorical(self, col_name: Hashable) -> bool: """Check if a column is considered a category column in the dataset object. Parameters ---------- col_name : Hashable The name of the column in the dataframe Returns ------- bool If is categorical according to input numbers """ return col_name in self._cat_features @property def index_name(self) -> t.Optional[Hashable]: """If index column exists, return its name. Returns ------- t.Optional[Hashable] index name """ return self._index_name @property def index_col(self) -> t.Optional[pd.Series]: """Return index column. Index can be a named column or DataFrame index. Returns ------- t.Optional[pd.Series] If index column exists, returns a pandas Series of the index column. """ if self._set_index_from_dataframe_index is True: index_name = self.data.index.name or 'index' if self._index_name is None: return pd.Series(self.data.index.get_level_values(0), name=index_name, index=self.data.index) elif isinstance(self._index_name, (str, int)): return pd.Series(self.data.index.get_level_values(self._index_name), name=index_name, index=self.data.index) else: raise runml_checksValueError(f'Don\'t know to handle index_name of type {type(self._index_name)}') elif self._index_name is not None: return self.data[self._index_name] else: # No meaningful index to use: Index column not configured, and _set_index_from_dataframe_index is False return @property def datetime_name(self) -> t.Optional[Hashable]: """If datetime column exists, return its name. Returns ------- t.Optional[Hashable] datetime name """ return self._datetime_name def get_datetime_column_from_index(self, datetime_name): """Retrieve the datetime info from the index if _set_datetime_from_dataframe_index is True.""" index_name = self.data.index.name or 'datetime' if datetime_name is None: return pd.Series(self.data.index.get_level_values(0), name=index_name, index=self.data.index) elif isinstance(datetime_name, (str, int)): return pd.Series(self.data.index.get_level_values(datetime_name), name=index_name, index=self.data.index) @property def datetime_col(self) -> t.Optional[pd.Series]: """Return datetime column if exists. Returns ------- t.Optional[pd.Series] Series of the datetime column """ if self._set_datetime_from_dataframe_index is True and self._datetime_column is not None: return self._datetime_column elif self._datetime_name is not None: return self.data[self._datetime_name] else: # No meaningful Datetime to use: # Datetime column not configured, and _set_datetime_from_dataframe_index is False return @property def label_name(self) -> t.Optional[Hashable]: """If label column exists, return its name. Returns ------- t.Optional[Hashable] Label name """ return self._label_name @property def features(self) -> t.List[Hashable]: """Return list of feature names. Returns ------- t.List[Hashable] List of feature names. """ return list(self._features) @property def features_columns(self) -> pd.DataFrame: """Return DataFrame containing only the features defined in the dataset, if features are empty raise error. Returns ------- pd.DataFrame """ self.assert_features() return self.data[self.features] @property def label_col(self) -> pd.Series: """Return Series of the label defined in the dataset, if label is not defined raise error. Returns ------- pd.Series """ self.assert_label() return self.data[self.label_name] @property def cat_features(self) -> t.List[Hashable]: """Return list of categorical feature names. Returns ------- t.List[Hashable] List of categorical feature names. """ return list(self._cat_features) @property def numerical_features(self) -> t.List[Hashable]: """Return list of numerical feature names. Returns ------- t.List[Hashable] List of numerical feature names. """ return list(self._numerical_features) @property def classes(self) -> t.Tuple[str, ...]: """Return the classes from label column in sorted list. if no label column defined, return empty list. Returns ------- t.Tuple[str, ...] Sorted classes """ if self._classes is None: if self.label_name is not None: self._classes = tuple(sorted(self.data[self.label_name].dropna().unique().tolist())) else: self._classes = tuple() return self._classes @property def columns_info(self) -> t.Dict[Hashable, str]: """Return the role and logical type of each column. Returns ------- t.Dict[Hashable, str] Directory of a column and its role """ columns = {} for column in self.data.columns: if column == self._index_name: value = 'index' elif column == self._datetime_name: value = 'date' elif column == self._label_name: value = 'label' elif column in self._features: if column in self.cat_features: value = 'categorical feature' elif column in self.numerical_features: value = 'numerical feature' else: value = 'other feature' else: value = 'other' columns[column] = value return columns def assert_label(self): """Check if label is defined and if not raise error. Raises ------ runml_checksNotSupportedError """ if not self.label_name: raise runml_checksNotSupportedError( 'Dataset does not contain a label column', html=f'Dataset does not contain a label column. see {_get_dataset_docs_tag()}' ) def assert_features(self): """Check if features are defined (not empty) and if not raise error. Raises ------ runml_checksNotSupportedError """ if not self.features: raise runml_checksNotSupportedError( 'Dataset does not contain any feature columns', html=f'Dataset does not contain any feature columns. see {_get_dataset_docs_tag()}' ) def assert_datetime(self): """Check if datetime is defined and if not raise error. Raises ------ runml_checksNotSupportedError """ if not (self._set_datetime_from_dataframe_index or self._datetime_name): raise DatasetValidationError( 'Dataset does not contain a datetime', html=f'Dataset does not contain a datetime. see {_get_dataset_docs_tag()}' ) def assert_index(self): """Check if index is defined and if not raise error. Raises ------ runml_checksNotSupportedError """ if not (self._set_index_from_dataframe_index or self._index_name): raise DatasetValidationError( 'Dataset does not contain an index', html=f'Dataset does not contain an index. see {_get_dataset_docs_tag()}' ) def select( self: TDataset, columns: t.Union[Hashable, t.List[Hashable], None] = None, ignore_columns: t.Union[Hashable, t.List[Hashable], None] = None, keep_label: bool = False ) -> TDataset: """Filter dataset columns by given params. Parameters ---------- columns : Union[Hashable, List[Hashable], None] Column names to keep. ignore_columns : Union[Hashable, List[Hashable], None] Column names to drop. Returns ------- TDataset horizontally filtered dataset Raises ------ runml_checksValueError In case one of columns given don't exists raise error """ if keep_label and columns and self.label_name not in columns: columns.append(self.label_name) new_data = select_from_dataframe(self._data, columns, ignore_columns) if new_data.equals(self.data): return self else: return self.copy(new_data) @classmethod def cast_to_dataset(cls, obj: t.Any) -> 'Dataset': """Verify Dataset or transform to Dataset. Function verifies that provided value is a non-empty instance of Dataset, otherwise raises an exception, but if the 'cast' flag is set to True it will also try to transform provided value to the Dataset instance. Parameters ---------- obj value to verify Raises ------ runml_checksValueError if the provided value is not a Dataset instance; if the provided value cannot be transformed into Dataset instance; """ if isinstance(obj, pd.DataFrame): get_logger().warning( 'Received a "pandas.DataFrame" instance. It is recommended to pass a "runml_checks.tabular.Dataset" ' 'instance by doing "Dataset(dataframe)"' ) obj = Dataset(obj) elif not isinstance(obj, Dataset): raise runml_checksValueError( f'non-empty instance of Dataset or DataFrame was expected, instead got {type(obj).__name__}' ) return obj.copy(obj.data) @classmethod def datasets_share_features(cls, *datasets: 'Dataset') -> bool: """Verify that all provided datasets share same features. Parameters ---------- datasets : List[Dataset] list of datasets to validate Returns ------- bool True if all datasets share same features, otherwise False Raises ------ AssertionError 'datasets' parameter is not a list; 'datasets' contains less than one dataset; """ assert len(datasets) > 1, "'datasets' must contains at least two items" # TODO: should not we also check features dtypes? features_names = set(datasets[0].features) for ds in datasets[1:]: if features_names != set(ds.features): return False return True @classmethod def datasets_share_categorical_features(cls, *datasets: 'Dataset') -> bool: """Verify that all provided datasets share same categorical features. Parameters ---------- datasets : List[Dataset] list of datasets to validate Returns ------- bool True if all datasets share same categorical features, otherwise False Raises ------ AssertionError 'datasets' parameter is not a list; 'datasets' contains less than one dataset; """ assert len(datasets) > 1, "'datasets' must contains at least two items" # TODO: should not we also check features dtypes? first = set(datasets[0].cat_features) for ds in datasets[1:]: features = set(ds.cat_features) if first != features: return False return True @classmethod def datasets_share_label(cls, *datasets: 'Dataset') -> bool: """Verify that all provided datasets share same label column. Parameters ---------- datasets : List[Dataset] list of datasets to validate Returns ------- bool True if all datasets share same categorical features, otherwise False Raises ------ AssertionError 'datasets' parameter is not a list; 'datasets' contains less than one dataset; """ assert len(datasets) > 1, "'datasets' must contains at least two items" # TODO: should not we also check label dtypes? label_name = datasets[0].label_name for ds in datasets[1:]: if ds.label_name != label_name: return False return True @classmethod def datasets_share_index(cls, *datasets: 'Dataset') -> bool: """Verify that all provided datasets share same index column. Parameters ---------- datasets : List[Dataset] list of datasets to validate Returns ------- bool True if all datasets share same index column, otherwise False Raises ------ AssertionError 'datasets' parameter is not a list; 'datasets' contains less than one dataset; """ assert len(datasets) > 1, "'datasets' must contains at least two items" first_ds = datasets[0] for ds in datasets[1:]: if (ds._index_name != first_ds._index_name or ds._set_index_from_dataframe_index != first_ds._set_index_from_dataframe_index): return False return True @classmethod def datasets_share_date(cls, *datasets: 'Dataset') -> bool: """Verify that all provided datasets share same date column. Parameters ---------- datasets : List[Dataset] list of datasets to validate Returns ------- bool True if all datasets share same date column, otherwise False Raises ------ AssertionError 'datasets' parameter is not a list; 'datasets' contains less than one dataset; """ assert len(datasets) > 1, "'datasets' must contains at least two items" first_ds = datasets[0] for ds in datasets[1:]: if (ds._datetime_name != first_ds._datetime_name or ds._set_datetime_from_dataframe_index != first_ds._set_datetime_from_dataframe_index): return False return True def _dataset_description(self) -> pd.DataFrame: data = self.data features = self.features categorical_features = self.cat_features numerical_features = self.numerical_features label_column = t.cast(pd.Series, data[self.label_name]) if self.label_name else None index_column = self.index_col datetime_column = self.datetime_col label_name = None index_name = None datetime_name = None dataset_columns_info = [] if index_column is not None: index_name = index_column.name dataset_columns_info.append([ index_name, infer_dtype(index_column, skipna=True), 'Index', 'set from dataframe index' if self._set_index_from_dataframe_index is True else '' ]) if datetime_column is not None: datetime_name = datetime_column.name dataset_columns_info.append([ datetime_name, infer_dtype(datetime_column, skipna=True), 'Datetime', 'set from DataFrame index' if self._set_datetime_from_dataframe_index is True else '' ]) if label_column is not None: label_name = label_column.name dataset_columns_info.append([ label_name, infer_dtype(label_column, skipna=True), t.cast(str, self.label_type.value).capitalize() + " LABEL", '' ]) all_columns = pd.Series(features + list(self.data.columns)).unique() for feature_name in t.cast(t.Iterable[str], all_columns): if feature_name in (index_name, datetime_name, label_name): continue feature_dtype = infer_dtype(data[feature_name], skipna=True) if feature_name in categorical_features: kind = 'Categorical Feature' elif feature_name in numerical_features: kind = 'Numerical Feature' elif feature_name in features: kind = 'Other Feature' else: kind = 'Dataset Column' dataset_columns_info.append([feature_name, feature_dtype, kind, '']) return pd.DataFrame( data=dataset_columns_info, columns=['Column', 'DType', 'Kind', 'Additional Info'], ) def __repr__( self, max_cols: int = 8, max_rows: int = 10, fmt: DatasetReprFmt = 'string' ) -> str: """Represent a dataset instance.""" info = self._dataset_description() columns = list(info[info['Additional Info'] == '']['Column']) data = self.data.loc[:, columns] # Sorting horizontally kwargs = dict(max_cols=max_cols, col_space=15) if fmt == 'string': features_info = info.to_string(max_rows=50, **kwargs) data_to_show = data.to_string(show_dimensions=True, max_rows=max_rows, **kwargs) title_template = '{:-^40}\n\n' return ''.join(( title_template.format(' Dataset Description '), f'{features_info}\n\n\n', title_template.format(' Dataset Content '), f'{data_to_show}\n\n', )) elif fmt == 'html': features_info = info.to_html(notebook=True, max_rows=50, **kwargs) data_to_show = data.to_html(notebook=True, max_rows=max_rows, **kwargs) return ''.join([ '<h4><b>Dataset Description</b></h4>', features_info, '<h4><b>Dataset Content</b></h4>', data_to_show ]) else: raise ValueError( '"fmt" parameter supports only next values [string, html]' ) def _ipython_display_(self): display_html(HTML(self.__repr__(fmt='html'))) def __len__(self) -> int: """Return number of samples in the member dataframe. Returns ------- int """ return self.n_samples def len_when_sampled(self, n_samples: int): """Return number of samples in the sampled dataframe this dataset is sampled with n_samples samples.""" return min(len(self), n_samples) def is_sampled(self, n_samples: int): """Return True if the dataset number of samples will decrease when sampled with n_samples samples.""" return len(self) > n_samples def _get_dataset_docs_tag(): """Return link to documentation for Dataset class.""" link = get_docs_link() + 'user-guide/tabular/dataset_object.html?html?utm_source=display_output' \ '&utm_medium=referral&utm_campaign=check_link' return f'<a href="{link}" target="_blank">Dataset docs</a>'
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/dataset.py
0.937017
0.75566
dataset.py
pypi
"""Module for base tabular abstractions.""" # pylint: disable=broad-except from typing import Callable, Mapping, Optional, Tuple, Union import numpy as np import pandas as pd from runml_checks.core import DatasetKind from runml_checks.core.check_result import CheckFailure from runml_checks.core.errors import runml_checksNotSupportedError from runml_checks.core.suite import BaseSuite, SuiteResult from runml_checks.tabular._shared_docs import docstrings from runml_checks.tabular.base_checks import ModelOnlyCheck, SingleDatasetCheck, TrainTestCheck from runml_checks.tabular.context import Context from runml_checks.tabular.dataset import Dataset from runml_checks.utils.decorators import deprecate_kwarg from runml_checks.utils.ipython import create_progress_bar from runml_checks.utils.typing import BasicModel __all__ = ['Suite'] class Suite(BaseSuite): """Tabular suite to run checks of types: TrainTestCheck, SingleDatasetCheck, ModelOnlyCheck.""" @classmethod def supported_checks(cls) -> Tuple: """Return tuple of supported check types of this suite.""" return TrainTestCheck, SingleDatasetCheck, ModelOnlyCheck @deprecate_kwarg(old_name='features_importance', new_name='feature_importance') @docstrings def run( self, train_dataset: Union[Dataset, pd.DataFrame, None] = None, test_dataset: Union[Dataset, pd.DataFrame, None] = None, model: Optional[BasicModel] = None, feature_importance: Optional[pd.Series] = None, feature_importance_force_permutation: bool = False, feature_importance_timeout: int = 120, scorers: Optional[Mapping[str, Union[str, Callable]]] = None, scorers_per_class: Optional[Mapping[str, Union[str, Callable]]] = None, with_display: bool = True, y_pred_train: Optional[np.ndarray] = None, y_pred_test: Optional[np.ndarray] = None, y_proba_train: Optional[np.ndarray] = None, y_proba_test: Optional[np.ndarray] = None, model_name: str = '', ) -> SuiteResult: """Run all checks. Parameters ---------- train_dataset: Optional[Union[Dataset, pd.DataFrame]] , default None object, representing data an estimator was fitted on test_dataset : Optional[Union[Dataset, pd.DataFrame]] , default None object, representing data an estimator predicts on model : Optional[BasicModel] , default None A scikit-learn-compatible fitted estimator instance {additional_context_params:2*indent} Returns ------- SuiteResult All results by all initialized checks """ context = Context( train_dataset, test_dataset, model, feature_importance=feature_importance, feature_importance_force_permutation=feature_importance_force_permutation, feature_importance_timeout=feature_importance_timeout, scorers=scorers, scorers_per_class=scorers_per_class, with_display=with_display, y_pred_train=y_pred_train, y_pred_test=y_pred_test, y_proba_train=y_proba_train, y_proba_test=y_proba_test, model_name=model_name ) progress_bar = create_progress_bar( iterable=list(self.checks.values()), name=self.name, unit='Check' ) # Run all checks results = [] for check in progress_bar: try: progress_bar.set_postfix({'Check': check.name()}, refresh=False) if isinstance(check, TrainTestCheck): if train_dataset is not None and test_dataset is not None: check_result = check.run_logic(context) context.finalize_check_result(check_result, check) results.append(check_result) else: msg = 'Check is irrelevant if not supplied with both train and test datasets' results.append(Suite._get_unsupported_failure(check, msg)) elif isinstance(check, SingleDatasetCheck): if train_dataset is not None: # In case of train & test, doesn't want to skip test if train fails. so have to explicitly # wrap it in try/except try: check_result = check.run_logic(context, dataset_kind=DatasetKind.TRAIN) context.finalize_check_result(check_result, check, DatasetKind.TRAIN) # In case of single dataset not need to edit the header if test_dataset is not None: check_result.header = f'{check_result.get_header()} - Train Dataset' except Exception as exp: check_result = CheckFailure(check, exp, ' - Train Dataset') results.append(check_result) if test_dataset is not None: try: check_result = check.run_logic(context, dataset_kind=DatasetKind.TEST) context.finalize_check_result(check_result, check, DatasetKind.TEST) # In case of single dataset not need to edit the header if train_dataset is not None: check_result.header = f'{check_result.get_header()} - Test Dataset' except Exception as exp: check_result = CheckFailure(check, exp, ' - Test Dataset') results.append(check_result) if train_dataset is None and test_dataset is None: msg = 'Check is irrelevant if dataset is not supplied' results.append(Suite._get_unsupported_failure(check, msg)) elif isinstance(check, ModelOnlyCheck): if model is not None: check_result = check.run_logic(context) context.finalize_check_result(check_result, check) results.append(check_result) else: msg = 'Check is irrelevant if model is not supplied' results.append(Suite._get_unsupported_failure(check, msg)) else: raise TypeError(f'Don\'t know how to handle type {check.__class__.__name__} in suite.') except Exception as exp: results.append(CheckFailure(check, exp)) return SuiteResult(self.name, results) @classmethod def _get_unsupported_failure(cls, check, msg): return CheckFailure(check, runml_checksNotSupportedError(msg))
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/suite.py
0.884595
0.325762
suite.py
pypi
"""Module for base tabular model abstractions.""" # pylint: disable=broad-except from typing import Any, List, Mapping, Tuple, Union from runml_checks.core.check_result import CheckFailure, CheckResult from runml_checks.core.errors import runml_checksNotSupportedError, runml_checksValueError from runml_checks.core.suite import BaseSuite, SuiteResult from runml_checks.tabular.context import Context from runml_checks.tabular.dataset import Dataset from runml_checks.utils.ipython import create_progress_bar __all__ = [ 'ModelComparisonSuite', 'ModelComparisonContext' ] class ModelComparisonSuite(BaseSuite): """Suite to run checks of types: CompareModelsBaseCheck.""" @classmethod def supported_checks(cls) -> Tuple: """Return tuple of supported check types of this suite.""" from runml_checks.tabular.base_checks import ModelComparisonCheck # pylint: disable=import-outside-toplevel return tuple([ModelComparisonCheck]) def run(self, train_datasets: Union[Dataset, List[Dataset]], test_datasets: Union[Dataset, List[Dataset]], models: Union[List[Any], Mapping[str, Any]] ) -> SuiteResult: """Run all checks. Parameters ---------- train_datasets : Union[Dataset, Container[Dataset]] representing data an estimator was fitted on test_datasets: Union[Dataset, Container[Dataset]] representing data an estimator was fitted on models : Union[Container[Any], Mapping[str, Any]] 2 or more scikit-learn-compatible fitted estimator instance Returns ------- SuiteResult All results by all initialized checks Raises ------ ValueError if check_datasets_policy is not of allowed types """ context = ModelComparisonContext(train_datasets, test_datasets, models) # Create progress bar progress_bar = create_progress_bar( iterable=list(self.checks.values()), name=self.name, unit='Check' ) # Run all checks results = [] for check in progress_bar: try: check_result = check.run_logic(context) results.append(check_result) except Exception as exp: results.append(CheckFailure(check, exp)) return SuiteResult(self.name, results) class ModelComparisonContext: """Contain processed input for model comparison checks.""" def __init__( self, train_datasets: Union[Dataset, List[Dataset]], test_datasets: Union[Dataset, List[Dataset]], models: Union[List[Any], Mapping[str, Any]] ): """Preprocess the parameters.""" # Validations if isinstance(train_datasets, Dataset) and isinstance(test_datasets, List): raise runml_checksNotSupportedError('Single train dataset with multiple test datasets is not supported.') if not isinstance(models, (List, Mapping)): raise runml_checksValueError('`models` must be a list or dictionary for compare models checks.') if len(models) < 2: raise runml_checksValueError('`models` must receive 2 or more models') # Some logic to assign names to models if isinstance(models, List): models_dict = {} for m in models: model_type = type(m).__name__ numerator = 1 name = model_type while name in models_dict: name = f'{model_type}_{numerator}' numerator += 1 models_dict[name] = m models = models_dict if not isinstance(train_datasets, List): train_datasets = [train_datasets] * len(models) if not isinstance(test_datasets, List): test_datasets = [test_datasets] * len(models) if len(train_datasets) != len(models): raise runml_checksValueError('number of train_datasets must equal to number of models') if len(test_datasets) != len(models): raise runml_checksValueError('number of test_datasets must equal to number of models') # Additional validations self.task_type = None self.contexts = [] for i in range(len(models)): train = train_datasets[i] test = test_datasets[i] model = list(models.values())[i] name = list(models.keys())[i] context = Context(train, test, model, model_name=name) if self.task_type is None: self.task_type = context.task_type elif self.task_type != context.task_type: raise runml_checksNotSupportedError('Got models of different task types') self.contexts.append(context) def __len__(self): """Return number of contexts.""" return len(self.contexts) def __iter__(self): """Return iterator over context objects.""" return iter(self.contexts) def __getitem__(self, item): """Return given context by index.""" return self.contexts[item] def finalize_check_result(self, check_result, check): """Run final processing on a check result which includes validation and conditions processing.""" # Validate the check result type if isinstance(check_result, CheckFailure): return if not isinstance(check_result, CheckResult): raise runml_checksValueError(f'Check {check.name()} expected to return CheckResult but got: ' + type(check_result).__name__) # Set reference between the check result and check check_result.check = check # Calculate conditions results check_result.process_conditions()
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/model_base.py
0.936503
0.524882
model_base.py
pypi
"""Module for tabular base checks.""" import abc from typing import Callable, List, Mapping, Optional, Union import numpy as np import pandas as pd from runml_checks.core.check_result import CheckFailure, CheckResult from runml_checks.core.checks import (BaseCheck, DatasetKind, ModelOnlyBaseCheck, SingleDatasetBaseCheck, TrainTestBaseCheck) from runml_checks.core.errors import runml_checksNotSupportedError from runml_checks.tabular import deprecation_warnings # pylint: disable=unused-import # noqa: F401 from runml_checks.tabular._shared_docs import docstrings from runml_checks.tabular.context import Context from runml_checks.tabular.dataset import Dataset from runml_checks.tabular.model_base import ModelComparisonContext from runml_checks.utils.decorators import deprecate_kwarg from runml_checks.utils.typing import BasicModel __all__ = [ 'SingleDatasetCheck', 'TrainTestCheck', 'ModelOnlyCheck', 'ModelComparisonCheck' ] class SingleDatasetCheck(SingleDatasetBaseCheck): """Parent class for checks that only use one dataset.""" context_type = Context @deprecate_kwarg(old_name='features_importance', new_name='feature_importance') @docstrings def run( self, dataset: Union[Dataset, pd.DataFrame], model: Optional[BasicModel] = None, model_name: str = '', feature_importance: Optional[pd.Series] = None, feature_importance_force_permutation: bool = False, feature_importance_timeout: int = 120, scorers: Optional[Mapping[str, Union[str, Callable]]] = None, scorers_per_class: Optional[Mapping[str, Union[str, Callable]]] = None, with_display: bool = True, y_pred_train: Optional[np.ndarray] = None, y_pred_test: Optional[np.ndarray] = None, y_proba_train: Optional[np.ndarray] = None, y_proba_test: Optional[np.ndarray] = None, ) -> CheckResult: """Run check. Parameters ---------- dataset: Union[Dataset, pd.DataFrame] Dataset or DataFrame object, representing data an estimator was fitted on model: Optional[BasicModel], default: None A scikit-learn-compatible fitted estimator instance {additional_context_params:2*indent} """ assert self.context_type is not None context = self.context_type( # pylint: disable=not-callable train=dataset, model=model, model_name=model_name, feature_importance=feature_importance, feature_importance_force_permutation=feature_importance_force_permutation, feature_importance_timeout=feature_importance_timeout, scorers=scorers, scorers_per_class=scorers_per_class, with_display=with_display, y_pred_train=y_pred_train, y_pred_test=y_pred_test, y_proba_train=y_proba_train, y_proba_test=y_proba_test, ) result = self.run_logic(context, dataset_kind=DatasetKind.TRAIN) context.finalize_check_result(result, self, DatasetKind.TRAIN) return result @abc.abstractmethod def run_logic(self, context, dataset_kind) -> CheckResult: """Run check.""" raise NotImplementedError() class TrainTestCheck(TrainTestBaseCheck): """Parent class for checks that compare two datasets. The class checks train dataset and test dataset for model training and test. """ context_type = Context @deprecate_kwarg(old_name='features_importance', new_name='feature_importance') @docstrings def run( self, train_dataset: Union[Dataset, pd.DataFrame], test_dataset: Union[Dataset, pd.DataFrame], model: Optional[BasicModel] = None, model_name: str = '', feature_importance: Optional[pd.Series] = None, feature_importance_force_permutation: bool = False, feature_importance_timeout: int = 120, scorers: Optional[Mapping[str, Union[str, Callable]]] = None, scorers_per_class: Optional[Mapping[str, Union[str, Callable]]] = None, with_display: bool = True, y_pred_train: Optional[np.ndarray] = None, y_pred_test: Optional[np.ndarray] = None, y_proba_train: Optional[np.ndarray] = None, y_proba_test: Optional[np.ndarray] = None, ) -> CheckResult: """Run check. Parameters ---------- train_dataset: Union[Dataset, pd.DataFrame] Dataset or DataFrame object, representing data an estimator was fitted on test_dataset: Union[Dataset, pd.DataFrame] Dataset or DataFrame object, representing data an estimator predicts on model: Optional[BasicModel], default: None A scikit-learn-compatible fitted estimator instance {additional_context_params:2*indent} """ assert self.context_type is not None context = self.context_type( # pylint: disable=not-callable train=train_dataset, test=test_dataset, model=model, model_name=model_name, feature_importance=feature_importance, feature_importance_force_permutation=feature_importance_force_permutation, feature_importance_timeout=feature_importance_timeout, scorers=scorers, scorers_per_class=scorers_per_class, y_pred_train=y_pred_train, y_pred_test=y_pred_test, y_proba_train=y_proba_train, y_proba_test=y_proba_test, with_display=with_display, ) result = self.run_logic(context) context.finalize_check_result(result, self) return result @abc.abstractmethod def run_logic(self, context) -> CheckResult: """Run check.""" raise NotImplementedError() class ModelOnlyCheck(ModelOnlyBaseCheck): """Parent class for checks that only use a model and no datasets.""" context_type = Context @deprecate_kwarg(old_name='features_importance', new_name='feature_importance') @docstrings def run( self, model: BasicModel, model_name: str = '', feature_importance: Optional[pd.Series] = None, feature_importance_force_permutation: bool = False, feature_importance_timeout: int = 120, scorers: Optional[Mapping[str, Union[str, Callable]]] = None, scorers_per_class: Optional[Mapping[str, Union[str, Callable]]] = None, with_display: bool = True, y_pred_train: Optional[np.ndarray] = None, y_pred_test: Optional[np.ndarray] = None, y_proba_train: Optional[np.ndarray] = None, y_proba_test: Optional[np.ndarray] = None, ) -> CheckResult: """Run check. Parameters ---------- model: BasicModel A scikit-learn-compatible fitted estimator instance {additional_context_params:2*indent} """ assert self.context_type is not None context = self.context_type( model=model, model_name=model_name, feature_importance=feature_importance, feature_importance_force_permutation=feature_importance_force_permutation, feature_importance_timeout=feature_importance_timeout, scorers=scorers, scorers_per_class=scorers_per_class, y_pred_train=y_pred_train, y_pred_test=y_pred_test, y_proba_train=y_proba_train, y_proba_test=y_proba_test, with_display=with_display ) result = self.run_logic(context) context.finalize_check_result(result, self) return result @abc.abstractmethod def run_logic(self, context) -> CheckResult: """Run check.""" raise NotImplementedError() @classmethod def _get_unsupported_failure(cls, check, msg): return CheckFailure(check, runml_checksNotSupportedError(msg)) class ModelComparisonCheck(BaseCheck): """Parent class for check that compares between two or more models.""" def run( self, train_datasets: Union[Dataset, List[Dataset]], test_datasets: Union[Dataset, List[Dataset]], models: Union[List[BasicModel], Mapping[str, BasicModel]] ) -> CheckResult: """Initialize context and pass to check logic. Parameters ---------- train_datasets: Union[Dataset, List[Dataset]] train datasets test_datasets: Union[Dataset, List[Dataset]] test datasets models: Union[List[BasicModel], Mapping[str, BasicModel]] list or map of models """ context = ModelComparisonContext(train_datasets, test_datasets, models) result = self.run_logic(context) context.finalize_check_result(result, self) return result @abc.abstractmethod def run_logic(self, multi_context: ModelComparisonContext) -> CheckResult: """Implement here logic of check.""" raise NotImplementedError()
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/base_checks.py
0.942619
0.395835
base_checks.py
pypi
"""Module importing all tabular checks.""" from .data_integrity import (ColumnsInfo, ConflictingLabels, DataDuplicates, FeatureLabelCorrelation, IsSingleValue, MixedDataTypes, MixedNulls, OutlierSampleDetection, SpecialCharacters, StringLengthOutOfBounds, StringMismatch) from .model_evaluation import (BoostingOverfit, CalibrationScore, ConfusionMatrixReport, ModelErrorAnalysis, ModelInferenceTime, ModelInfo, MultiModelPerformanceReport, PerformanceReport, RegressionErrorDistribution, RegressionSystematicError, RocReport, SegmentPerformance, SimpleModelComparison, TrainTestPredictionDrift, UnusedFeatures, WeakSegmentsPerformance) from .train_test_validation import (CategoryMismatchTrainTest, DatasetsSizeComparison, DateTrainTestLeakageDuplicates, DateTrainTestLeakageOverlap, DominantFrequencyChange, FeatureLabelCorrelationChange, IdentifierLabelCorrelation, IndexTrainTestLeakage, NewLabelTrainTest, StringMismatchComparison, TrainTestFeatureDrift, TrainTestLabelDrift, TrainTestSamplesMix, WholeDatasetDrift) __all__ = [ # integrity checks 'MixedNulls', 'StringMismatch', 'MixedDataTypes', 'IsSingleValue', 'SpecialCharacters', 'StringLengthOutOfBounds', 'StringMismatchComparison', 'DominantFrequencyChange', 'DataDuplicates', 'CategoryMismatchTrainTest', 'NewLabelTrainTest', 'ConflictingLabels', 'OutlierSampleDetection', # methodology checks 'BoostingOverfit', 'UnusedFeatures', 'FeatureLabelCorrelation', 'FeatureLabelCorrelationChange', 'IndexTrainTestLeakage', 'TrainTestSamplesMix', 'DateTrainTestLeakageDuplicates', 'DateTrainTestLeakageOverlap', 'IdentifierLabelCorrelation', 'ModelInferenceTime', 'DatasetsSizeComparison', # overview checks 'ModelInfo', 'ColumnsInfo', # distribution checks 'TrainTestFeatureDrift', 'TrainTestLabelDrift', 'WholeDatasetDrift', 'TrainTestPredictionDrift', # performance checks 'PerformanceReport', 'ConfusionMatrixReport', 'RocReport', 'SimpleModelComparison', 'CalibrationScore', 'SegmentPerformance', 'RegressionSystematicError', 'RegressionErrorDistribution', 'MultiModelPerformanceReport', 'WeakSegmentsPerformance', 'ModelErrorAnalysis' ]
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/__init__.py
0.851645
0.445831
__init__.py
pypi
"""The feature label correlation check module.""" import typing as t import runml_checks.ppscore as pps from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.core.check_utils.feature_label_correlation_utils import get_pps_figure, pd_series_to_trace from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.tabular.utils.messages import get_condition_passed_message from runml_checks.utils.strings import format_number from runml_checks.utils.typing import Hashable __all__ = ['FeatureLabelCorrelation'] FLC = t.TypeVar('FLC', bound='FeatureLabelCorrelation') pps_url = 'https://docs.runml_checks.com/en/stable/checks_gallery/tabular/' \ 'train_test_validation/plot_feature_label_correlation_change.html' pps_html = f'<a href={pps_url} target="_blank">Predictive Power Score</a>' class FeatureLabelCorrelation(SingleDatasetCheck): """Return the PPS (Predictive Power Score) of all features in relation to the label. The PPS represents the ability of a feature to single-handedly predict another feature or label. A high PPS (close to 1) can mean that this feature's success in predicting the label is actually due to data leakage - meaning that the feature holds information that is based on the label to begin with. Uses the ppscore package - for more info, see https://github.com/8080labs/ppscore Parameters ---------- ppscore_params : dict , default: None dictionary of additional parameters for the ppscore.predictors function n_top_features : int , default: 5 Number of features to show, sorted by the magnitude of difference in PPS random_state : int , default: None Random state for the ppscore.predictors function """ def __init__( self, ppscore_params=None, n_top_features: int = 5, random_state: int = None, **kwargs ): super().__init__(**kwargs) self.ppscore_params = ppscore_params or {} self.n_top_features = n_top_features self.random_state = random_state def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check. Returns ------- CheckResult value is a dictionary with PPS per feature column. data is a bar graph of the PPS of each feature. Raises ------ runml_checksValueError If the object is not a Dataset instance with a label. """ dataset = context.get_data_by_kind(dataset_kind) dataset.assert_features() dataset.assert_label() relevant_columns = dataset.features + [dataset.label_name] df_pps = pps.predictors(df=dataset.data[relevant_columns], y=dataset.label_name, random_seed=self.random_state, **self.ppscore_params) s_ppscore = df_pps.set_index('x', drop=True)['ppscore'] if context.with_display: top_to_show = s_ppscore.head(self.n_top_features) fig = get_pps_figure(per_class=False, n_of_features=len(top_to_show)) fig.add_trace(pd_series_to_trace(top_to_show, dataset_kind.value)) text = [ 'The Predictive Power Score (PPS) is used to estimate the ability of a feature to predict the ' f'label by itself (Read more about {pps_html}).' ' A high PPS (close to 1) can mean that this feature\'s success in predicting the label is' ' actually due to data leakage - meaning that the feature holds information that is based on the label ' 'to begin with.'] # display only if not all scores are 0 display = [fig, *text] if s_ppscore.sum() else None else: display = None return CheckResult(value=s_ppscore.to_dict(), display=display, header='Feature Label Correlation') def add_condition_feature_pps_less_than(self: FLC, threshold: float = 0.8) -> FLC: """ Add condition that will check that pps of the specified feature(s) is less than the threshold. Parameters ---------- threshold : float , default: 0.8 pps upper bound Returns ------- FLC """ def condition(value: t.Dict[Hashable, float]) -> ConditionResult: failed_features = { feature_name: format_number(pps_value) for feature_name, pps_value in value.items() if pps_value >= threshold } if failed_features: message = f'Found {len(failed_features)} out of {len(value)} features with PPS above threshold: ' \ f'{failed_features}' return ConditionResult(ConditionCategory.FAIL, message) else: return ConditionResult(ConditionCategory.PASS, get_condition_passed_message(value)) return self.add_condition(f'Features\' Predictive Power Score is less than {format_number(threshold)}', condition)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/data_integrity/feature_label_correlation.py
0.945261
0.553928
feature_label_correlation.py
pypi
"""module contains Invalid Chars check.""" from collections import defaultdict from typing import List, Union import pandas as pd from pandas.api.types import infer_dtype from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.tabular.utils.messages import get_condition_passed_message from runml_checks.utils.dataframes import select_from_dataframe from runml_checks.utils.features import N_TOP_MESSAGE, column_importance_sorter_df from runml_checks.utils.strings import format_percent, string_baseform from runml_checks.utils.typing import Hashable __all__ = ['SpecialCharacters'] class SpecialCharacters(SingleDatasetCheck): """Search in column[s] for values that contains only special characters. Parameters ---------- columns : Union[Hashable, List[Hashable]] , default: None Columns to check, if none are given checks all columns except ignored ones. ignore_columns : Union[Hashable, List[Hashable]] , default: None Columns to ignore, if none given checks based on columns variable. n_most_common : int , default: 2 Number of most common special-only samples to show in results n_top_columns : int , optional amount of columns to show ordered by feature importance (date, index, label are first) """ def __init__( self, columns: Union[Hashable, List[Hashable], None] = None, ignore_columns: Union[Hashable, List[Hashable], None] = None, n_most_common: int = 2, n_top_columns: int = 10, **kwargs ): super().__init__(**kwargs) self.columns = columns self.ignore_columns = ignore_columns self.n_most_common = n_most_common self.n_top_columns = n_top_columns def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check. Returns ------- CheckResult value is dict of column as key and percent of special characters samples as value display is DataFrame with ('invalids') for any column with special_characters chars. """ dataset = context.get_data_by_kind(dataset_kind) df = select_from_dataframe(dataset.data, self.columns, self.ignore_columns) # Result value: { Column Name: pct} display_array = [] result = {} for column_name in df.columns: column_data = df[column_name] # Get dict of samples to count special_samples = _get_special_samples(column_data) if special_samples: result[column_name] = sum(special_samples.values()) / column_data.size if context.with_display: percent = format_percent(sum(special_samples.values()) / column_data.size) sortkey = lambda x: x[1] top_n_samples_items = sorted(special_samples.items(), key=sortkey, reverse=True) top_n_samples_items = top_n_samples_items[:self.n_most_common] top_n_samples_values = [item[0] for item in top_n_samples_items] display_array.append([column_name, percent, top_n_samples_values]) else: result[column_name] = 0 if display_array: df_graph = pd.DataFrame(display_array, columns=['Column Name', '% Special-Only Samples', 'Most Common Special-Only Samples']) df_graph = df_graph.set_index(['Column Name']) df_graph = column_importance_sorter_df(df_graph, dataset, context.feature_importance, self.n_top_columns, col='Column Name') display = [N_TOP_MESSAGE % self.n_top_columns, df_graph] else: display = None return CheckResult(result, display=display) def add_condition_ratio_of_special_characters_less_or_equal(self, max_ratio: float = 0.001): """Add condition - ratio of entirely special character in column is less or equal to the threshold. Parameters ---------- max_ratio : float , default: 0.001 Maximum ratio allowed. """ name = f'Ratio of samples containing solely special character is less or equal to {format_percent(max_ratio)}' def condition(result): not_passed = {k: format_percent(v) for k, v in result.items() if v > max_ratio} if not_passed: details = f'Found {len(not_passed)} out of {len(result)} relevant columns with ratio above threshold: '\ f'{not_passed}' return ConditionResult(ConditionCategory.WARN, details) return ConditionResult(ConditionCategory.PASS, get_condition_passed_message(result)) return self.add_condition(name, condition) def _get_special_samples(column_data: pd.Series) -> Union[dict, None]: if not _is_stringed_type(column_data): return None samples_to_count = defaultdict(lambda: 0) for sample in column_data: if isinstance(sample, str) and len(sample) > 0 and len(string_baseform(sample, True)) == 0: samples_to_count[sample] = samples_to_count[sample] + 1 return samples_to_count or None def _is_stringed_type(col) -> bool: return infer_dtype(col) not in ['integer', 'decimal', 'floating']
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/data_integrity/special_chars.py
0.9357
0.519826
special_chars.py
pypi
"""module contains Data Duplicates check.""" from typing import List, Union import pandas as pd from typing_extensions import TypedDict from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.utils.strings import format_percent from runml_checks.utils.typing import Hashable __all__ = ['ConflictingLabels'] class ResultValue(TypedDict): percent: float samples: List[pd.DataFrame] class ConflictingLabels(SingleDatasetCheck): """Find samples which have the exact same features' values but different labels. Parameters ---------- columns : Union[Hashable, List[Hashable]] , default: None List of columns to check, if none given checks all columns Except ignored ones. ignore_columns : Union[Hashable, List[Hashable]] , default: None List of columns to ignore, if none given checks based on columns variable. n_to_show : int , default: 5 number of most common ambiguous samples to show. """ def __init__( self, columns: Union[Hashable, List[Hashable], None] = None, ignore_columns: Union[Hashable, List[Hashable], None] = None, n_to_show: int = 5, **kwargs ): super().__init__(**kwargs) self.columns = columns self.ignore_columns = ignore_columns self.n_to_show = n_to_show def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check. Returns ------- CheckResult Value of result is a dictionary that contains percentage of ambiguous samples and list of samples with confliction labels. Display shows 'n_to_show' most ambiguous labels with their samples. """ dataset = context.get_data_by_kind(dataset_kind) context.assert_classification_task() dataset.assert_label() dataset = dataset.select(self.columns, self.ignore_columns, keep_label=True) features = dataset.features label_name = dataset.label_name # HACK: pandas have bug with groupby on category dtypes, so until it fixed, change dtypes manually df = dataset.data category_columns = df.dtypes[df.dtypes == 'category'].index.tolist() if category_columns: df = df.astype({c: 'object' for c in category_columns}) group_unique_data = df.groupby(features, dropna=False) group_unique_labels = group_unique_data.nunique()[label_name] num_ambiguous = 0 ambiguous_label_name = 'Observed Labels' samples = [] display_samples = [] data = sorted( zip(group_unique_labels, group_unique_data), key=lambda x: x[0], reverse=True ) for num_labels, group_data in data: if num_labels == 1: continue group_df = group_data[1] n_data_sample = group_df.shape[0] num_ambiguous += n_data_sample samples.append(group_df.loc[:, [label_name, *features]].copy()) if context.with_display is True: display_sample = dict(group_df[features].iloc[0]) ambiguous_labels = tuple(sorted(group_df[label_name].unique())) display_sample[ambiguous_label_name] = ambiguous_labels display_samples.append(display_sample) if len(display_samples) == 0: display = None else: display = pd.DataFrame.from_records(display_samples[:self.n_to_show]) display.set_index(ambiguous_label_name, inplace=True) display = [ 'Each row in the table shows an example of a data sample ' 'and the its observed labels as found in the dataset. ' f'Showing top {self.n_to_show} of {display.shape[0]}', display ] return CheckResult( display=display, value=ResultValue( percent=num_ambiguous / dataset.n_samples, samples=samples, ) ) def add_condition_ratio_of_conflicting_labels_less_or_equal(self, max_ratio=0): """Add condition - require ratio of samples with conflicting labels less or equal to max_ratio. Parameters ---------- max_ratio : float , default: 0 Maximum ratio of samples with multiple labels. """ def max_ratio_condition(result: ResultValue) -> ConditionResult: percent = result['percent'] details = f'Ratio of samples with conflicting labels: {format_percent(percent)}' category = ConditionCategory.PASS if percent <= max_ratio else ConditionCategory.FAIL return ConditionResult(category, details) return self.add_condition(f'Ambiguous sample ratio is less or equal to {format_percent(max_ratio)}', max_ratio_condition)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/data_integrity/conflicting_labels.py
0.947223
0.696778
conflicting_labels.py
pypi
"""Module contains Mixed Nulls check.""" import math from typing import Dict, Iterable, List, Union import numpy as np import pandas as pd from pandas.api.types import is_categorical_dtype from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.core.errors import runml_checksValueError from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.tabular.utils.messages import get_condition_passed_message from runml_checks.utils.dataframes import select_from_dataframe from runml_checks.utils.features import N_TOP_MESSAGE from runml_checks.utils.strings import format_percent, string_baseform from runml_checks.utils.typing import Hashable __all__ = ['MixedNulls'] DEFAULT_NULL_VALUES = {'none', 'null', 'nan', 'na', '', '\x00', '\x00\x00'} class MixedNulls(SingleDatasetCheck): """Search for various types of null values, including string representations of null. Parameters ---------- null_string_list : Iterable[str] , default: None List of strings to be considered alternative null representations check_nan : bool , default: True Whether to add to null list to check also NaN values columns : Union[Hashable, List[Hashable]] , default: None Columns to check, if none are given checks all columns except ignored ones. ignore_columns : Union[Hashable, List[Hashable]] , default: None Columns to ignore, if none given checks based on columns variable n_top_columns : int , optional amount of columns to show ordered by feature importance (date, index, label are first) """ def __init__( self, null_string_list: Iterable[str] = None, check_nan: bool = True, columns: Union[Hashable, List[Hashable], None] = None, ignore_columns: Union[Hashable, List[Hashable], None] = None, n_top_columns: int = 10, **kwargs ): super().__init__(**kwargs) self.null_string_list = null_string_list self.check_nan = check_nan self.columns = columns self.ignore_columns = ignore_columns self.n_top_columns = n_top_columns def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check. Returns ------- CheckResult Value is dict with columns as key, and dict of null values as value: {column: {null_value: {count: x, percent: y}, ...}, ...} display is DataFrame with columns ('Column Name', 'Value', 'Count', 'Percentage') for any column that has more than 1 null values. """ dataset = context.get_data_by_kind(dataset_kind) df = dataset.data df = select_from_dataframe(df, self.columns, self.ignore_columns) null_string_list = self._validate_null_string_list(self.null_string_list) # Result value display_array = [] result_dict = {} for column_name in list(df.columns): column_data = df[column_name] if is_categorical_dtype(column_data) is True: # NOTE: # 'pandas.Series.value_counts' and 'pandas.Series.apply' # work in an unusual way with categorical data types # - 'value_counts' returns all categorical values even if they are not in series # - 'apply' applies function to each category, not to values # therefore we processing categorical dtypes differently # NOTE: # 'Series.value_counts' method transforms null values like 'None', 'pd.Na', 'pd.NaT' # into 'np.nan' therefore it cannot be used for usual dtypes, because we will lose info # about all different null types in the column null_counts = {} for value, count in column_data.value_counts(dropna=False).to_dict().items(): if count > 0: if pd.isna(value): null_counts[nan_type(value)] = count elif string_baseform(value) in null_string_list: null_counts[repr(value).replace('\'', '"')] = count else: string_null_counts = { repr(value).replace('\'', '"'): count for value, count in column_data.value_counts(dropna=True).iteritems() if string_baseform(value) in null_string_list } nan_data_counts = column_data[column_data.isna()].apply(nan_type).value_counts().to_dict() null_counts = {**string_null_counts, **nan_data_counts} result_dict[column_name] = {} # Save the column nulls info for null_value, count in null_counts.items(): percent = count / len(column_data) display_array.append([column_name, null_value, count, format_percent(percent)]) result_dict[column_name][null_value] = {'count': count, 'percent': percent} # Create dataframe to display table if context.with_display and display_array: df_graph = pd.DataFrame(display_array, columns=['Column Name', 'Value', 'Count', 'Percent of data']) order = df_graph['Column Name'].value_counts(ascending=False).index[:self.n_top_columns] df_graph = df_graph.set_index(['Column Name', 'Value']) df_graph = df_graph.loc[order, :] display = [N_TOP_MESSAGE % self.n_top_columns, df_graph] else: display = None return CheckResult(result_dict, display=display) def _validate_null_string_list(self, nsl) -> set: """Validate the object given is a list of strings. If null is given return default list of null values. Parameters ---------- nsl Object to validate Returns ------- set Returns list of null values as set object """ result: set if nsl: if not isinstance(nsl, Iterable): raise runml_checksValueError('null_string_list must be an iterable') if len(nsl) == 0: raise runml_checksValueError("null_string_list can't be empty list") if any((not isinstance(string, str) for string in nsl)): raise runml_checksValueError("null_string_list must contain only items of type 'str'") result = set(nsl) else: # Default values result = set(DEFAULT_NULL_VALUES) return result def add_condition_different_nulls_less_equal_to(self, max_allowed_null_types: int = 1): """Add condition - require column's number of different null values to be less or equal to threshold. Parameters ---------- max_allowed_null_types : int , default: 1 Number of different null value types which is the maximum allowed. """ def condition(result: Dict) -> ConditionResult: not_passing_columns = [k for k, v in result.items() if len(v) > max_allowed_null_types] if not_passing_columns: details = f'Found {len(not_passing_columns)} out of {len(result)} columns with amount of null types ' \ f'above threshold: {not_passing_columns}' return ConditionResult(ConditionCategory.FAIL, details) else: return ConditionResult(ConditionCategory.PASS, get_condition_passed_message(result)) return self.add_condition(f'Number of different null types is less or equal to {max_allowed_null_types}', condition) def nan_type(x): if x is np.nan: return 'numpy.nan' elif x is pd.NA: return 'pandas.NA' elif x is pd.NaT: return 'pandas.NaT' elif isinstance(x, float) and math.isnan(x): return 'math.nan' return str(x)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/data_integrity/mixed_nulls.py
0.935729
0.471041
mixed_nulls.py
pypi
"""String length outlier check.""" from typing import Dict, List, Tuple, Union import numpy as np import pandas as pd from pandas import DataFrame, Series from scipy import stats from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.tabular.utils.messages import get_condition_passed_message from runml_checks.utils.dataframes import select_from_dataframe from runml_checks.utils.features import N_TOP_MESSAGE, column_importance_sorter_df from runml_checks.utils.strings import format_number, format_percent, is_string_column from runml_checks.utils.typing import Hashable __all__ = ['StringLengthOutOfBounds'] class StringLengthOutOfBounds(SingleDatasetCheck): """Detect strings with length that is much longer/shorter than the identified "normal" string lengths. Parameters ---------- columns :Union[Hashable, List[Hashable]] , default: None Columns to check, if none are given checks all columns except ignored ones. ignore_columns : Union[Hashable, List[Hashable]] , default: None Columns to ignore, if none given checks based on columns variable num_percentiles : int , default: 1000 Number of percentiles values to retrieve for the length of the samples in the string column. Affects the resolution of string lengths that is used to detect outliers. inner_quantile_range : int , default: 94 The int upper percentile [0-100] defining the inner percentile range. E.g. for 98 the range would be 2%-98%. outlier_factor : int , default: 4 Strings would be defined as outliers if their length is outlier_factor times more/less than the values inside the inner quantile range. min_length_difference : int , default: 5 The minimum length difference to be considered as outlier. min_length_ratio_difference : int , default: 0.5 Used to calculate the minimum length difference to be considered as outlier. (calculated form this times the average of the normal lengths.) min_unique_value_ratio : float , default: 0.01 Min min_unique_values : int , default: 100 Minimum unique values in column to calculate string length outlier n_top_columns : int , optional amount of columns to show ordered by feature importance (date, index, label are first) outlier_length_to_show :int , default: 50 Maximum length of outlier to show in results. If an outlier is longer it is trimmed and added '...' samples_per_range_to_show : int , default: 3 Number of outlier samples to show in results per outlier range found. """ def __init__( self, columns: Union[Hashable, List[Hashable]] = None, ignore_columns: Union[Hashable, List[Hashable]] = None, num_percentiles: int = 1000, inner_quantile_range: int = 94, outlier_factor: int = 4, min_length_difference: int = 5, min_length_ratio_difference: float = 0.5, min_unique_value_ratio: float = 0.01, min_unique_values: int = 100, n_top_columns: int = 10, outlier_length_to_show: int = 50, samples_per_range_to_show: int = 3, **kwargs ): super().__init__(**kwargs) self.columns = columns self.ignore_columns = ignore_columns self.num_percentiles = num_percentiles self.inner_quantile_range = inner_quantile_range self.outlier_factor = outlier_factor self.n_top_columns = n_top_columns self.min_length_difference = min_length_difference self.min_length_ratio_difference = min_length_ratio_difference self.min_unique_value_ratio = min_unique_value_ratio self.min_unique_values = min_unique_values self.outlier_length_to_show = outlier_length_to_show self.samples_per_range_to_show = samples_per_range_to_show def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check.""" dataset = context.get_data_by_kind(dataset_kind) df = select_from_dataframe(dataset.data, self.columns, self.ignore_columns) display_format = [] results = {} for column_name in df.columns: column: Series = df[column_name].dropna() if column_name in dataset.cat_features or not is_string_column(column): continue results[column_name] = {'outliers': []} string_length_column = column.map(lambda x: len(str(x))) # If not a lot of unique values, calculate the percentiles for existing values. if string_length_column.nunique() < self.num_percentiles: string_length_column = string_length_column.sort_values() quantile_list = 100 * stats.rankdata(string_length_column, 'ordinal') / len(string_length_column) percentile_histogram = {quantile_list[i]: string_length_column.iloc[i] for i in range(len(string_length_column))} else: quantile_list = list(np.linspace(0.0, 100.0, self.num_percentiles + 1)) quantile_values = np.percentile(string_length_column, quantile_list, interpolation='nearest') percentile_histogram = dict(zip(quantile_list, list(quantile_values))) outlier_sections = outlier_on_percentile_histogram(percentile_histogram, self.inner_quantile_range, self.outlier_factor) if outlier_sections: quantiles_not_in_section = \ [x for x in quantile_list if all((not _in_range(x, a, b)) for a, b in outlier_sections)] non_outlier_section = (min(quantiles_not_in_section), max(quantiles_not_in_section)) non_outlier_lower_limit = percentile_histogram[non_outlier_section[0]] non_outlier_upper_limit = percentile_histogram[non_outlier_section[1]] # add to result for outlier_section in outlier_sections: lower_range, upper_range = self._filter_outlier_section(percentile_histogram[outlier_section[0]], percentile_histogram[outlier_section[1]], non_outlier_lower_limit, non_outlier_upper_limit) if lower_range > upper_range: continue outlier_samples = string_length_column[ string_length_column.between(lower_range, upper_range, inclusive='both')] if not outlier_samples.empty: outlier_examples = column[outlier_samples[:self.samples_per_range_to_show].index] outlier_examples = [trim(x, self.outlier_length_to_show) for x in outlier_examples] results[column_name]['normal_range'] = { 'min': non_outlier_lower_limit, 'max': non_outlier_upper_limit } results[column_name]['n_samples'] = column.size results[column_name]['outliers'].append({ 'range': {'min': lower_range, 'max': upper_range }, 'n_samples': outlier_samples.size }) if context.with_display: display_format.append([column_name, f'{format_number(non_outlier_lower_limit)} -' f' {format_number(non_outlier_upper_limit)}', f'{format_number(lower_range)} -' f' {format_number(upper_range)}', f'{outlier_samples.size}', outlier_examples ]) # Create dataframe to display graph if display_format: df_graph = DataFrame(display_format, columns=['Column Name', 'Range of Detected Normal String Lengths', 'Range of Detected Outlier String Lengths', 'Number of Outlier Samples', 'Example Samples']) df_graph = df_graph.set_index(['Column Name', 'Range of Detected Normal String Lengths', 'Range of Detected Outlier String Lengths']) df_graph = column_importance_sorter_df(df_graph, dataset, context.feature_importance, self.n_top_columns, col='Column Name') display = [N_TOP_MESSAGE % self.n_top_columns, df_graph] else: display = None return CheckResult(results, display=display) def _filter_outlier_section(self, lower_range, upper_range, non_outlier_lower_range, non_outlier_upper_range): lower_range_distance = lower_range - non_outlier_upper_range higher_range_distance = non_outlier_lower_range - upper_range non_outlier_range_average = (non_outlier_upper_range + non_outlier_lower_range) / 2 minimum_difference = max(self.min_length_difference, self.min_length_ratio_difference * non_outlier_range_average) if lower_range_distance > 0: if lower_range_distance < minimum_difference: lower_range += minimum_difference - lower_range_distance elif higher_range_distance > 0: if higher_range_distance < minimum_difference: upper_range -= minimum_difference - higher_range_distance return lower_range, upper_range def add_condition_number_of_outliers_less_or_equal(self, max_outliers: int = 0): """Add condition - require column's number of string length outliers to be less or equal to the threshold. Parameters ---------- max_outliers : int , default: 0 Number of string length outliers which is the maximum allowed. """ def compare_outlier_count(result: Dict) -> ConditionResult: not_passing_columns = {} for column_name in result.keys(): column = result[column_name] total_outliers = sum((outlier['n_samples'] for outlier in column['outliers'])) if total_outliers > max_outliers: not_passing_columns[column_name] = total_outliers if not_passing_columns: details = f'Found {len(not_passing_columns)} out of {len(result)} columns with number of outliers ' \ f'above threshold: {not_passing_columns}' return ConditionResult(ConditionCategory.FAIL, details) else: details = f'Passed for {len(result)} columns' return ConditionResult(ConditionCategory.PASS, details) return self.add_condition( f'Number of string length outliers is less or equal to {max_outliers}', compare_outlier_count) def add_condition_ratio_of_outliers_less_or_equal(self, max_ratio: float = 0): """Add condition - require column's ratio of string length outliers to be less or equal to threshold. Parameters ---------- max_ratio : float , default: 0 Maximum allowed string length outliers ratio. """ def compare_outlier_ratio(result: Dict): not_passing_columns = {} for column_name in result.keys(): column = result[column_name] total_outliers = sum((outlier['n_samples'] for outlier in column['outliers'])) ratio = total_outliers / column['n_samples'] if total_outliers > 0 else 0 if ratio > max_ratio: not_passing_columns[column_name] = format_percent(ratio) if not_passing_columns: details = f'Found {len(not_passing_columns)} out of {len(result)} relevant columns with outliers ' \ f'ratio above threshold: {not_passing_columns}' return ConditionResult(ConditionCategory.WARN, details) else: return ConditionResult(ConditionCategory.PASS, get_condition_passed_message(result)) return self.add_condition( f'Ratio of string length outliers is less or equal to {format_percent(max_ratio)}', compare_outlier_ratio) def outlier_on_percentile_histogram(percentile_histogram: Dict[float, float], iqr_percent: float = 85, outlier_factor: float = 5) -> Tuple[Tuple[float, float]]: """Get outlier ranges on histogram. Parameters ---------- percentile_histogram : Dict[float, float] histogram to search for outliers in shape [0.0-100.0]->[float] iqr_percent : float , default: 85 Interquartile range upper percentage, start searching for outliers outside IQR. outlier_factor : float , default: 5 a factor to consider outlier. Returns ------- Tuple[Tuple[float, float]] percent ranges in the histogram that are outliers, empty tuple if none is found """ if any((k < 0) or k > 100 for k in percentile_histogram.keys()): raise ValueError('dict keys must be percentiles between 0 and 100') if any((v < 0) for v in percentile_histogram.values()): raise ValueError('dict values must be counts that are non-negative numbers') percentile_df = pd.DataFrame.from_dict(percentile_histogram, orient='index') # calculate IQR with iqr_percent closest_point_upper = np.argmin(np.abs(iqr_percent - percentile_df.index.values)) closest_point_lower = np.argmin(np.abs(100 - iqr_percent - percentile_df.index.values)) center_point = np.argmin(np.abs(50 - percentile_df.index.values)) iqr = np.abs(percentile_df.iloc[closest_point_upper] - percentile_df.iloc[closest_point_lower]) outlier_df = percentile_df[ (np.abs(percentile_df - percentile_df.iloc[center_point]) > outlier_factor * iqr / 2).values ] outlier_section_list = [] lower_outlier_range = outlier_df[outlier_df.index < 50] if lower_outlier_range.shape[0] > 0: outlier_section_list.append((lower_outlier_range.index.values[0], lower_outlier_range.index.values[-1])) upper_outlier_range = outlier_df[outlier_df.index > 50] if upper_outlier_range.shape[0] > 0: outlier_section_list.append((upper_outlier_range.index.values[0], upper_outlier_range.index.values[-1])) return tuple(outlier_section_list) def _in_range(x, a, b): return a <= x <= b def trim(x, max_length): if len(x) <= max_length: return x return x[:max_length] + '...'
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/data_integrity/string_length_out_of_bounds.py
0.934701
0.554591
string_length_out_of_bounds.py
pypi
"""module contains Mixed Types check.""" from typing import List, Tuple, Union import numpy as np import pandas as pd from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.utils.dataframes import select_from_dataframe from runml_checks.utils.features import N_TOP_MESSAGE, column_importance_sorter_df from runml_checks.utils.strings import format_list, format_number, format_percent, get_ellipsis, is_string_column from runml_checks.utils.typing import Hashable __all__ = ['MixedDataTypes'] class MixedDataTypes(SingleDatasetCheck): """Detect columns which contain a mix of numerical and string values. Parameters ---------- columns : Union[Hashable, List[Hashable]] , default: None Columns to check, if none are given checks all columns except ignored ones. ignore_columns : Union[Hashable, List[Hashable]] , default: None Columns to ignore, if none given checks based on columns variable. n_top_columns : int , optional amount of columns to show ordered by feature importance (date, index, label are first) """ def __init__( self, columns: Union[Hashable, List[Hashable], None] = None, ignore_columns: Union[Hashable, List[Hashable], None] = None, n_top_columns: int = 10, **kwargs ): super().__init__(**kwargs) self.columns = columns self.ignore_columns = ignore_columns self.n_top_columns = n_top_columns def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check. Returns ------- CheckResult value is a dict where the key is the column name as key and the value is the ratio 'strings' and 'numbers' for any column with mixed data types. numbers will also include hidden numbers in string representation. """ dataset = context.get_data_by_kind(dataset_kind) feature_importance = context.feature_importance df = select_from_dataframe(dataset.data, self.columns, self.ignore_columns) # Result value: { Column Name: {string: pct, numbers: pct}} display_dict = {} result_dict = {} for column_name in df.columns: column_data = df[column_name].dropna() mix = self._get_data_mix(column_data) result_dict[column_name] = mix if context.with_display and mix: # Format percents for display formated_mix = {} formated_mix['Strings'] = format_percent(mix['strings']) formated_mix['Numbers'] = format_percent(mix['numbers']) formated_mix['Strings examples'] = [get_ellipsis(strr, 15) for strr in mix['strings_examples']] formated_mix['Numbers examples'] = '[' + format_list([format_number(float(num)) for num in mix['numbers_examples']]) + ']' display_dict[column_name] = formated_mix if display_dict: df_graph = pd.DataFrame.from_dict(display_dict) df_graph = column_importance_sorter_df(df_graph.T, dataset, feature_importance, self.n_top_columns).T display = [N_TOP_MESSAGE % self.n_top_columns, df_graph] else: display = None return CheckResult(result_dict, display=display) def _get_data_mix(self, column_data: pd.Series) -> dict: if is_string_column(column_data): return self._check_mixed_percentage(column_data) return {} def _check_mixed_percentage(self, column_data: pd.Series) -> dict: total_rows = column_data.count() numbers_in_col = set() strings_in_col = set() def is_float(x) -> bool: try: float(x) if len(numbers_in_col) < 3: numbers_in_col.add(x) return True except ValueError: if len(strings_in_col) < 3: strings_in_col.add(x) return False nums = sum(column_data.apply(is_float)) if nums in (total_rows, 0): return {} # Then we've got a mix nums_pct = nums / total_rows strs_pct = (np.abs(nums - total_rows)) / total_rows return {'strings': strs_pct, 'numbers': nums_pct, 'strings_examples': strings_in_col, 'numbers_examples': numbers_in_col} def add_condition_rare_type_ratio_not_in_range(self, ratio_range: Tuple[float, float] = (0.01, 0.1)): """Add condition - Whether the ratio of rarer data type (strings or numbers) is not in the "danger zone". The "danger zone" represents the following logic - if the rarer data type is, for example, 30% of the data, than the column is presumably supposed to contain both numbers and string values. If the rarer data type is, for example, less than 1% of the data, than it's presumably a contamination, but a negligible one. In the range between, there is a real chance that the rarer data type may represent a problem to model training and inference. Parameters ---------- ratio_range : Tuple[float, float] , default: (0.01 , 0.1) The range between which the ratio of rarer data type in the column is considered a problem. """ def condition(result): no_mix_columns = [] failing_columns = [] for col, ratios in result.items(): # Columns without a mix contains empty dict for ratios if not ratios: no_mix_columns.append(col) continue rarer_ratio = min(ratios['strings'], ratios['numbers']) if ratio_range[0] < rarer_ratio < ratio_range[1]: failing_columns.append(col) if failing_columns: details = f'Found {len(failing_columns)} out of {len(result)} columns with non-negligible quantities ' \ f'of samples with a different data type from the majority of samples: {failing_columns}' return ConditionResult(ConditionCategory.WARN, details) details = f'{len(result)} columns passed: found {len(result) - len(no_mix_columns)} columns with ' \ f'negligible types mix, and {len(no_mix_columns)} columns without any types mix' return ConditionResult(ConditionCategory.PASS, details) name = f'Rare data types in column are either more than {format_percent(ratio_range[1])} or less ' \ f'than {format_percent(ratio_range[0])} of the data' return self.add_condition(name, condition)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/data_integrity/mixed_data_types.py
0.912033
0.513303
mixed_data_types.py
pypi
"""Module contains is_single_value check.""" from typing import List, Union import pandas as pd from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.tabular.utils.messages import get_condition_passed_message from runml_checks.utils.dataframes import select_from_dataframe from runml_checks.utils.typing import Hashable __all__ = ['IsSingleValue'] class IsSingleValue(SingleDatasetCheck): """Check if there are columns which have only a single unique value in all rows. Parameters ---------- columns : Union[Hashable, List[Hashable]] , default: None Columns to check, if none are given checks all columns except ignored ones. ignore_columns : Union[Hashable, List[Hashable]] , default: None Columns to ignore, if none given checks based on columns variable. ignore_nan : bool, default True Whether to ignore NaN values in a column when counting the number of unique values. """ def __init__( self, columns: Union[Hashable, List[Hashable], None] = None, ignore_columns: Union[Hashable, List[Hashable], None] = None, ignore_nan: bool = True, **kwargs ): super().__init__(**kwargs) self.columns = columns self.ignore_columns = ignore_columns self.ignore_nan = ignore_nan def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check. Returns ------- CheckResult value of result is a dict of all columns with number of unique values in format {column: number_of_uniques} display is a series with columns that have only one unique """ # Validate parameters df = context.get_data_by_kind(dataset_kind).data df = select_from_dataframe(df, self.columns, self.ignore_columns) num_unique_per_col = df.nunique(dropna=self.ignore_nan) is_single_unique_value = (num_unique_per_col == 1) if context.with_display and is_single_unique_value.any(): # get names of columns with one unique value # pylint: disable=unsubscriptable-object cols_with_single = is_single_unique_value[is_single_unique_value].index.to_list() uniques = pd.DataFrame({ column_name: [column.sort_values(kind='mergesort').values[0]] for column_name, column in df.loc[:, cols_with_single].items() }) uniques.index = ['Single unique value'] display = ['The following columns have only one unique value', uniques] else: display = None return CheckResult(num_unique_per_col.to_dict(), header='Single Value in Column', display=display) def add_condition_not_single_value(self): """Add condition - no column contains only a single value.""" name = 'Does not contain only a single value' def condition(result): single_value_cols = [k for k, v in result.items() if v == 1] if single_value_cols: details = f'Found {len(single_value_cols)} out of {len(result)} columns with a single value: ' \ f'{single_value_cols}' return ConditionResult(ConditionCategory.FAIL, details) else: return ConditionResult(ConditionCategory.PASS, get_condition_passed_message(result)) return self.add_condition(name, condition)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/data_integrity/is_single_value.py
0.938695
0.478468
is_single_value.py
pypi
"""module contains the Identifier-Label Correlation check.""" from typing import Dict import pandas as pd import plotly.express as px import runml_checks.ppscore as pps from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.core.errors import DatasetValidationError from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.tabular.dataset import _get_dataset_docs_tag from runml_checks.tabular.utils.messages import get_condition_passed_message from runml_checks.utils.strings import format_number __all__ = ['IdentifierLabelCorrelation'] class IdentifierLabelCorrelation(SingleDatasetCheck): """Check if identifiers (Index/Date) can be used to predict the label. Parameters ---------- ppscore_params : any , default: None dictionary containing params to pass to ppscore predictor """ def __init__(self, ppscore_params=None, **kwargs): super().__init__(**kwargs) self.ppscore_params = ppscore_params or {} def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check. Returns ------- CheckResult value is a dictionary with PPS per feature column. data is a bar graph of the PPS of each feature. Raises ------ runml_checksValueError If the object is not a Dataset instance with a label. """ dataset = context.get_data_by_kind(dataset_kind) dataset.assert_label() label_name = dataset.label_name relevant_data = pd.DataFrame({ it.name: it for it in (dataset.index_col, dataset.datetime_col, dataset.label_col) if it is not None }) if len(relevant_data.columns) == 1: raise DatasetValidationError( 'Dataset does not contain an index or a datetime', html=f'Dataset does not contain an index or a datetime. see {_get_dataset_docs_tag()}' ) df_pps = pps.predictors( df=relevant_data, y=label_name, random_seed=42, **self.ppscore_params ) df_pps = df_pps.set_index('x', drop=True) s_ppscore = df_pps['ppscore'] if context.with_display: xaxis_layout = dict( title='Identifiers', type='category', # NOTE: # the range, in this case, is needed to fix a problem with # too wide bars when there are only one or two of them`s on # the plot, plus it also centralizes them`s on the plot # The min value of the range (range(min. max)) is bigger because # otherwise bars will not be centralized on the plot, they will # appear on the left part of the plot (that is probably because of zero) range=(-3, len(s_ppscore.index) + 2) ) yaxis_layout = dict( fixedrange=True, range=(0, 1), title='predictive power score (PPS)' ) red_heavy_colorscale = [ [0, 'rgb(255, 255, 255)'], # jan [0.1, 'rgb(255,155,100)'], [0.2, 'rgb(255, 50, 50)'], [0.3, 'rgb(200, 0, 0)'], [1, 'rgb(55, 0, 0)'] ] figure = px.bar(s_ppscore, x=s_ppscore.index, y='ppscore', color='ppscore', color_continuous_scale=red_heavy_colorscale) figure.update_layout( height=400 ) figure.update_layout( dict( xaxis=xaxis_layout, yaxis=yaxis_layout, coloraxis=dict( cmin=0, cmax=1 ) ) ) text = ['The PPS represents the ability of a feature to single-handedly predict another feature or label.', 'For Identifier columns (Index/Date) PPS should be nearly 0, otherwise date and index have some ' 'predictive effect on the label.'] # display only if not all scores are 0 display = [figure, *text] if s_ppscore.sum() else None else: display = None return CheckResult(value=s_ppscore.to_dict(), display=display) def add_condition_pps_less_or_equal(self, max_pps: float = 0): """Add condition - require columns' pps to be less or equal to threshold. Parameters ---------- max_pps : float , default: 0 Maximum allowed string length outliers ratio. """ def compare_pps(result: Dict): not_passing_columns = {k: format_number(score) for k, score in result.items() if score > max_pps} if not_passing_columns: return ConditionResult(ConditionCategory.FAIL, f'Found {len(not_passing_columns)} out of {len(result)} columns with PPS above' f' threshold: {not_passing_columns}') else: return ConditionResult(ConditionCategory.PASS, get_condition_passed_message(result)) return self.add_condition( f'Identifier columns PPS is less or equal to {format_number(max_pps)}', compare_pps)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/data_integrity/identifier_label_correlation.py
0.953199
0.473231
identifier_label_correlation.py
pypi
"""String mismatch functions.""" import itertools from collections import defaultdict from typing import List, Union import pandas as pd from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.tabular.utils.messages import get_condition_passed_message from runml_checks.utils.dataframes import select_from_dataframe from runml_checks.utils.features import N_TOP_MESSAGE, column_importance_sorter_df from runml_checks.utils.strings import format_percent, get_base_form_to_variants_dict, is_string_column from runml_checks.utils.typing import Hashable __all__ = ['StringMismatch'] class StringMismatch(SingleDatasetCheck): """Detect different variants of string categories (e.g. "mislabeled" vs "mis-labeled") in a categorical column. This check tests all the categorical columns within a dataset and search for variants of similar strings. Specifically, we define similarity between strings if they are equal when ignoring case and non-letter characters. Example: We have a column with similar strings 'OK' and 'ok.' which are variants of the same category. Knowing they both exist we can fix our data so it will have only one category. Parameters ---------- columns : Union[Hashable, List[Hashable]] , default: None Columns to check, if none are given checks all columns except ignored ones. ignore_columns : Union[Hashable, List[Hashable]] , default: None Columns to ignore, if none given checks based on columns variable n_top_columns : int , optional amount of columns to show ordered by feature importance (date, index, label are first) n_samples : int , default: 1_000_000 number of samples to use for this check. random_state : int, default: 42 random seed for all check internals. """ def __init__( self, columns: Union[Hashable, List[Hashable], None] = None, ignore_columns: Union[Hashable, List[Hashable], None] = None, n_top_columns: int = 10, n_samples: int = 1_000_000, random_state: int = 42, **kwargs ): super().__init__(**kwargs) self.columns = columns self.ignore_columns = ignore_columns self.n_top_columns = n_top_columns self.n_samples = n_samples self.random_state = random_state def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check.""" dataset = context.get_data_by_kind(dataset_kind) df = select_from_dataframe(dataset.sample(self.n_samples, random_state=self.random_state).data, self.columns, self.ignore_columns) display_results = [] result_dict = {} for column_name in df.columns: column: pd.Series = df[column_name] if not is_string_column(column): continue result_dict[column_name] = {} value_counts = column.value_counts() uniques = column.unique() base_form_to_variants = get_base_form_to_variants_dict(uniques) for base_form, variants in base_form_to_variants.items(): if len(variants) == 1: continue result_dict[column_name][base_form] = [] for variant in variants: count = value_counts[variant] percent = count / len(column) result_dict[column_name][base_form].append({ 'variant': variant, 'count': count, 'percent': percent }) if context.with_display: display_results.append([column_name, base_form, variant, count, format_percent(percent)]) # Create dataframe to display graph if display_results: df_graph = pd.DataFrame(display_results, columns=['Column Name', 'Base form', 'Value', 'Count', '% In data']) df_graph = df_graph.set_index(['Column Name', 'Base form']) df_graph = column_importance_sorter_df(df_graph, dataset, context.feature_importance, self.n_top_columns, col='Column Name') display = [N_TOP_MESSAGE % self.n_top_columns, df_graph] else: display = None return CheckResult(result_dict, display=display) def add_condition_number_variants_less_or_equal(self, num_max_variants: int): """Add condition - number of variants (per string baseform) is less or equal to threshold. Parameters ---------- num_max_variants : int Maximum number of variants allowed. """ name = f'Number of string variants is less or equal to {num_max_variants}' return self.add_condition(name, _condition_variants_number, num_max_variants=num_max_variants) def add_condition_no_variants(self): """Add condition - no variants are allowed.""" name = 'No string variants' return self.add_condition(name, _condition_variants_number, num_max_variants=0) def add_condition_ratio_variants_less_or_equal(self, max_ratio: float = 0.01): """Add condition - percentage of variants in data is less or equal to threshold. Parameters ---------- max_ratio : float , default: 0.01 Maximum percent of variants allowed in data. """ def condition(result, max_ratio: float): not_passing_columns = {} for col, baseforms in result.items(): variants_percent_sum = 0 for variants_list in baseforms.values(): variants_percent_sum += sum([v['percent'] for v in variants_list]) if variants_percent_sum > max_ratio: not_passing_columns[col] = format_percent(variants_percent_sum) if not_passing_columns: details = f'Found {len(not_passing_columns)} out of {len(result)} relevant columns with variants ' \ f'ratio above threshold: {not_passing_columns}' return ConditionResult(ConditionCategory.FAIL, details) return ConditionResult(ConditionCategory.PASS, get_condition_passed_message(result)) name = f'Ratio of variants is less or equal to {format_percent(max_ratio)}' return self.add_condition(name, condition, max_ratio=max_ratio) def _condition_variants_number(result, num_max_variants: int, max_cols_to_show: int = 5, max_forms_to_show: int = 5): not_passing_variants = defaultdict(list) for col, baseforms in result.items(): for base_form, variants_list in baseforms.items(): if len(variants_list) > num_max_variants: if len(not_passing_variants[col]) < max_forms_to_show: not_passing_variants[col].append(base_form) if not_passing_variants: variants_to_show = dict(itertools.islice(not_passing_variants.items(), max_cols_to_show)) details = f'Found {len(not_passing_variants)} out of {len(result)} columns with amount of variants above ' \ f'threshold: {variants_to_show}' return ConditionResult(ConditionCategory.WARN, details) return ConditionResult(ConditionCategory.PASS, get_condition_passed_message(result))
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/data_integrity/string_mismatch.py
0.94121
0.479808
string_mismatch.py
pypi
"""module contains Data Duplicates check.""" from typing import List, Union import numpy as np from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.core.errors import DatasetValidationError from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.utils.dataframes import select_from_dataframe from runml_checks.utils.strings import format_list, format_percent from runml_checks.utils.typing import Hashable __all__ = ['DataDuplicates'] class DataDuplicates(SingleDatasetCheck): """Checks for duplicate samples in the dataset. Parameters ---------- columns : Union[Hashable, List[Hashable]] , default: None List of columns to check, if none given checks all columns Except ignored ones. ignore_columns : Union[Hashable, List[Hashable]] , default: None List of columns to ignore, if none given checks based on columns variable. n_to_show : int , default: 5 number of most common duplicated samples to show. """ def __init__( self, columns: Union[Hashable, List[Hashable], None] = None, ignore_columns: Union[Hashable, List[Hashable], None] = None, n_to_show: int = 5, **kwargs ): super().__init__(**kwargs) self.columns = columns self.ignore_columns = ignore_columns self.n_to_show = n_to_show def run_logic(self, context: Context, dataset_kind): """Run check. Returns ------- CheckResult percentage of duplicates and display of the top n_to_show most duplicated. """ df = context.get_data_by_kind(dataset_kind).data df = select_from_dataframe(df, self.columns, self.ignore_columns) data_columns = list(df.columns) n_samples = df.shape[0] if n_samples == 0: raise DatasetValidationError('Dataset does not contain any data') # HACK: pandas have bug with groupby on category dtypes, so until it fixed, change dtypes manually category_columns = df.dtypes[df.dtypes == 'category'].index.tolist() if category_columns: df = df.astype({c: 'object' for c in category_columns}) group_unique_data = df[data_columns].groupby(data_columns, dropna=False).size() n_unique = len(group_unique_data) percent_duplicate = 1 - (1.0 * int(n_unique)) / (1.0 * int(n_samples)) if context.with_display and percent_duplicate > 0: # patched for anonymous_series is_anonymous_series = 0 in group_unique_data.keys().names if is_anonymous_series: new_name = str(group_unique_data.keys().names) new_index = group_unique_data.keys() new_index.names = [new_name if name == 0 else name for name in new_index.names] group_unique_data = group_unique_data.reindex(new_index) duplicates_counted = group_unique_data.reset_index().rename(columns={0: 'Number of Duplicates'}) if is_anonymous_series: duplicates_counted.rename(columns={new_name: 0}, inplace=True) most_duplicates = duplicates_counted[duplicates_counted['Number of Duplicates'] > 1]. \ nlargest(self.n_to_show, ['Number of Duplicates']) indexes = [] for row in most_duplicates.iloc(): indexes.append(format_list(df.index[np.all(df == row[data_columns], axis=1)].to_list())) most_duplicates['Instances'] = indexes most_duplicates = most_duplicates.set_index(['Instances', 'Number of Duplicates']) text = f'{format_percent(percent_duplicate)} of data samples are duplicates. ' explanation = 'Each row in the table shows an example of duplicate data and the number of times it appears.' display = [text, explanation, most_duplicates] else: display = None return CheckResult(value=percent_duplicate, display=display) def add_condition_ratio_less_or_equal(self, max_ratio: float = 0): """Add condition - require duplicate ratio to be less or equal to max_ratio. Parameters ---------- max_ratio : float , default: 0 Maximum ratio of duplicates. """ def max_ratio_condition(result: float) -> ConditionResult: details = f'Found {format_percent(result)} duplicate data' category = ConditionCategory.PASS if result <= max_ratio else ConditionCategory.WARN return ConditionResult(category, details) return self.add_condition(f'Duplicate data ratio is less or equal to {format_percent(max_ratio)}', max_ratio_condition)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/data_integrity/data_duplicates.py
0.939311
0.57517
data_duplicates.py
pypi
"""module contains the Feature-Feature Correlation check.""" from typing import List, Union import numpy as np import pandas as pd import plotly.express as px from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.utils.correlation_methods import correlation_ratio, symmetric_theil_u_correlation from runml_checks.utils.dataframes import generalized_corrwith, select_from_dataframe from runml_checks.utils.typing import Hashable __all__ = ['FeatureFeatureCorrelation'] class FeatureFeatureCorrelation(SingleDatasetCheck): """ Checks for pairwise correlation between the features. Extremely correlated pairs of features could indicate redundancy and even duplication. Removing highly correlated features from the data can significantly increase model speed due to the curse of dimensionality, and decrease harmful bias. Parameters ---------- columns : Union[Hashable, List[Hashable]] , default: None Columns to check, if none are given checks all columns except ignored ones. ignore_columns : Union[Hashable, List[Hashable]] , default: None Columns to ignore, if none given checks based on columns variable. show_n_top_columns : int , optional amount of columns to show ordered by the highest correlation, default: 10 n_samples : int , default: 10000 number of samples to use for this check. random_state : int, default: 42 random seed for all check internals. """ def __init__( self, columns: Union[Hashable, List[Hashable], None] = None, ignore_columns: Union[Hashable, List[Hashable], None] = None, show_n_top_columns: int = 10, n_samples: int = 10000, random_state: int = 42, **kwargs ): super().__init__(**kwargs) self.columns = columns self.ignore_columns = ignore_columns self.n_top_columns = show_n_top_columns self.n_samples = n_samples self.random_state = random_state def run_logic(self, context: Context, dataset_kind) -> CheckResult: """ Run Check. Returns ------- CheckResult A DataFrame of the pairwise correlations between the features. """ dataset = context.get_data_by_kind(dataset_kind) df = select_from_dataframe(dataset.sample(self.n_samples, random_state=self.random_state).data, self.columns, self.ignore_columns) dataset.assert_features() # must use list comprehension for deterministic order of columns num_features = [f for f in dataset.numerical_features if f in df.columns] cat_features = [f for f in dataset.cat_features if f in df.columns] encoded_cat_data = df.loc[:, cat_features].apply(lambda x: pd.factorize(x)[0]) # NaNs are encoded as -1, replace back to NaN encoded_cat_data.replace(-1, np.NaN, inplace=True) all_features = num_features + cat_features full_df = pd.DataFrame(index=all_features, columns=all_features) # Numerical-numerical correlations if num_features: full_df.loc[num_features, num_features] = df.loc[:, num_features].corr(method='spearman') # Categorical-categorical correlations if cat_features: full_df.loc[cat_features, cat_features] = encoded_cat_data.corr(method=symmetric_theil_u_correlation) # Numerical-categorical correlations if num_features and cat_features: num_cat_corr = generalized_corrwith(df.loc[:, num_features], encoded_cat_data, method=correlation_ratio) full_df.loc[num_features, cat_features] = num_cat_corr full_df.loc[cat_features, num_features] = num_cat_corr.transpose() # Display if context.with_display: top_n_features = full_df.max(axis=1).sort_values(ascending=False).head(self.n_top_columns).index top_n_df = full_df.loc[top_n_features, top_n_features].abs() num_nans = top_n_df.isna().sum().sum() top_n_df.fillna(0.0, inplace=True) fig = [px.imshow(top_n_df, color_continuous_scale=px.colors.sequential.thermal), '* Displayed as absolute values.'] if num_nans: fig.append(f'* NaN values (where the correlation could not be calculated)' f' are displayed as 0.0, total of {num_nans} NaNs in this display.') if len(dataset.features) > len(all_features): fig.append('* Some features in the dataset are neither numerical nor categorical and therefore not ' 'calculated.') else: fig = None return CheckResult(value=full_df, header='Feature-Feature Correlation', display=fig) def add_condition_max_number_of_pairs_above_threshold(self, threshold: float = 0.9, n_pairs: int = 0): """Add condition that all pairwise correlations are less than threshold, except for the diagonal.""" def condition(result): results_ge = result[result > threshold].stack().index.to_list() high_corr_pairs = [(i, j) for (i, j) in results_ge if i < j] # remove diagonal and duplicate pairs if len(high_corr_pairs) > n_pairs: return ConditionResult(ConditionCategory.FAIL, f'Correlation is greater than {threshold} for pairs {high_corr_pairs}') else: return ConditionResult(ConditionCategory.PASS, f'All correlations are less than {threshold} except pairs {high_corr_pairs}') return self.add_condition(f'Not more than {n_pairs} pairs are correlated above {threshold}', condition)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/data_integrity/feature_feature_correlation.py
0.948191
0.64692
feature_feature_correlation.py
pypi
"""Module of model error analysis check.""" from typing import Callable, Dict, Tuple, Union from sklearn import preprocessing from runml_checks import CheckFailure from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.core.errors import runml_checksProcessError from runml_checks.tabular import Context, Dataset, TrainTestCheck from runml_checks.tabular.utils.task_type import TaskType from runml_checks.utils.performance.error_model import error_model_display, model_error_contribution from runml_checks.utils.single_sample_metrics import per_sample_cross_entropy, per_sample_mse from runml_checks.utils.strings import format_percent __all__ = ['ModelErrorAnalysis'] class ModelErrorAnalysis(TrainTestCheck): """Find features that best split the data into segments of high and low model error. The check trains a regression model to predict the error of the user's model. Then, the features scoring the highest feature importance for the error regression model are selected and the distribution of the error vs the feature values is plotted. The check results are shown only if the error regression model manages to predict the error well enough. Parameters ---------- max_features_to_show : int , default: 3 maximal number of features to show error distribution for. min_feature_contribution : float , default: 0.15 minimum feature importance of a feature to the error regression model in order to show the feature. min_error_model_score : float , default: 0.5 minimum r^2 score of the error regression model for displaying the check. min_segment_size : float , default: 0.05 minimal fraction of data that can comprise a weak segment. alternative_scorer : Tuple[str, Callable] , default None An optional dictionary of scorer name to scorer function. Only a single entry is allowed in this check. If none given, using default scorer n_samples : int , default: 50_000 number of samples to use for this check. n_display_samples : int , default: 5_000 number of samples to display in scatter plot. random_state : int, default: 42 random seed for all check internals. Notes ----- Scorers are a convention of sklearn to evaluate a model. `See scorers documentation <https://scikit-learn.org/stable/modules/model_evaluation.html#scoring>`_ A scorer is a function which accepts (model, X, y_true) and returns a float result which is the score. For every scorer higher scores are better than lower scores. You can create a scorer out of existing sklearn metrics: .. code-block:: python from sklearn.metrics import roc_auc_score, make_scorer training_labels = [1, 2, 3] auc_scorer = make_scorer(roc_auc_score, labels=training_labels, multi_class='ovr') # Note that the labels parameter is required for multi-class classification in metrics like roc_auc_score or # log_loss that use the predict_proba function of the model, in case that not all labels are present in the test # set. Or you can implement your own: .. code-block:: python from sklearn.metrics import make_scorer def my_mse(y_true, y_pred): return (y_true - y_pred) ** 2 # Mark greater_is_better=False, since scorers always suppose to return # value to maximize. my_mse_scorer = make_scorer(my_mse, greater_is_better=False) """ def __init__( self, max_features_to_show: int = 3, min_feature_contribution: float = 0.15, min_error_model_score: float = 0.5, min_segment_size: float = 0.05, alternative_scorer: Tuple[str, Union[str, Callable]] = None, n_samples: int = 50_000, n_display_samples: int = 5_000, random_state: int = 42, **kwargs ): super().__init__(**kwargs) self.max_features_to_show = max_features_to_show self.min_feature_contribution = min_feature_contribution self.min_error_model_score = min_error_model_score self.min_segment_size = min_segment_size self.user_scorer = dict([alternative_scorer]) if alternative_scorer else None self.n_samples = n_samples self.n_display_samples = n_display_samples self.random_state = random_state def run_logic(self, context: Context) -> CheckResult: """Run check.""" train_dataset = context.train test_dataset = context.test train_dataset.assert_label() task_type = context.task_type model = context.model scorer = context.get_single_scorer(self.user_scorer) train_dataset = train_dataset.sample(self.n_samples, random_state=self.random_state, drop_na_label=True) test_dataset = test_dataset.sample(self.n_samples, random_state=self.random_state, drop_na_label=True) # Create scoring function, used to calculate the per sample model error if task_type == TaskType.REGRESSION: def scoring_func(dataset: Dataset): return per_sample_mse(dataset.label_col, model.predict(dataset.features_columns)) else: def scoring_func(dataset: Dataset): le = preprocessing.LabelEncoder() le.fit(dataset.classes) encoded_label = le.transform(dataset.label_col) return per_sample_cross_entropy(encoded_label, model.predict_proba(dataset.features_columns)) train_scores = scoring_func(train_dataset) test_scores = scoring_func(test_dataset) cat_features = train_dataset.cat_features numeric_features = train_dataset.numerical_features try: error_fi, error_model_predicted = model_error_contribution(train_dataset.features_columns, train_scores, test_dataset.features_columns, test_scores, numeric_features, cat_features, min_error_model_score=self.min_error_model_score, random_state=self.random_state) except runml_checksProcessError as e: return CheckFailure(self, e) display, value = error_model_display(error_fi, error_model_predicted, test_dataset, model, scorer, self.max_features_to_show, self.min_feature_contribution, self.n_display_samples, self.min_segment_size, self.random_state, context.with_display) headnote = """<span> The following graphs show the distribution of error for top features that are most useful for distinguishing high error samples from low error samples. </span>""" display = [headnote] + display if display else None return CheckResult(value, display=display) def add_condition_segments_performance_relative_difference_less_than(self, max_ratio_change: float = 0.05): """Add condition - require that the difference of performance between the segments is less than threshold. Parameters ---------- max_ratio_change : float , default: 0.05 maximal ratio of change between the two segments' performance. """ def condition(result: Dict) -> ConditionResult: features_diff = {} feature_res = result['feature_segments'] for feature in feature_res.keys(): # If only one segment identified, skip if len(feature_res[feature]) < 2: continue performance_diff = ( abs(feature_res[feature]['segment1']['score'] - feature_res[feature]['segment2']['score']) / abs(max(feature_res[feature]['segment1']['score'], feature_res[feature]['segment2']['score']))) features_diff[feature] = performance_diff failed_features = {f: format_percent(p) for f, p in features_diff.items() if p >= max_ratio_change} if failed_features: sorted_fails = dict(sorted(failed_features.items(), key=lambda item: item[1])) msg = f'{result["scorer_name"]} difference for failed features: {sorted_fails}' return ConditionResult(ConditionCategory.WARN, msg) else: avg_diff = format_percent(sum(features_diff.values()) / len(features_diff)) msg = f'Average {result["scorer_name"]} difference: {avg_diff}' return ConditionResult(ConditionCategory.PASS, msg) return self.add_condition(f'The performance difference of the detected segments is ' f'less than {format_percent(max_ratio_change)}', condition)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/model_evaluation/model_error_analysis.py
0.937383
0.709994
model_error_analysis.py
pypi
"""The roc_report check module.""" from typing import Dict, List import numpy as np import plotly.graph_objects as go import sklearn from runml_checks.core import CheckResult, ConditionResult from runml_checks.core.condition import ConditionCategory from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.utils.dict_funcs import get_dict_entry_by_value from runml_checks.utils.strings import format_number __all__ = ['RocReport'] class RocReport(SingleDatasetCheck): """Calculate the ROC curve for each class. For each class plots the ROC curve, calculate AUC score and displays the optimal threshold cutoff point. Parameters ---------- excluded_classes : List , default: None List of classes to exclude from the condition. """ def __init__(self, excluded_classes: List = None, **kwargs): super().__init__(**kwargs) self.excluded_classes = excluded_classes or [] def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check. Returns ------- CheckResult value is dictionary of a class and its auc score, displays the roc graph with each class Raises ------ runml_checksValueError If the object is not a Dataset instance with a label """ dataset = context.get_data_by_kind(dataset_kind) context.assert_classification_task() ds_y = dataset.label_col ds_x = dataset.features_columns y_pred_prob = context.model.predict_proba(ds_x) dataset_classes = dataset.classes multi_y = (np.array(ds_y)[:, None] == np.unique(ds_y)).astype(int) fpr = {} tpr = {} thresholds = {} roc_auc = {} for i, class_name in enumerate(dataset_classes): if class_name in self.excluded_classes: continue fpr[class_name], tpr[class_name], thresholds[class_name] = \ sklearn.metrics.roc_curve(multi_y[:, i], y_pred_prob[:, i]) roc_auc[class_name] = sklearn.metrics.auc(fpr[class_name], tpr[class_name]) if context.with_display: fig = go.Figure() for class_name in dataset_classes: if class_name in self.excluded_classes: continue if len(dataset_classes) == 2: fig.add_trace(go.Scatter( x=fpr[class_name], y=tpr[class_name], line_width=2, name=f'auc = {roc_auc[class_name]:0.2f}', )) fig.add_trace(get_cutoff_figure(tpr[class_name], fpr[class_name], thresholds[class_name])) break else: fig.add_trace(go.Scatter( x=fpr[class_name], y=tpr[class_name], line_width=2, name=f'Class {class_name} (auc = {roc_auc[class_name]:0.2f})' )) fig.add_trace(get_cutoff_figure(tpr[class_name], fpr[class_name], thresholds[class_name], class_name)) fig.add_trace(go.Scatter( x=[0, 1], y=[0, 1], line=dict(color='#444'), line_width=2, line_dash='dash', showlegend=False )) fig.update_xaxes(title='False Positive Rate') fig.update_yaxes(title='True Positive Rate') if len(dataset_classes) == 2: fig.update_layout( title_text='Receiver operating characteristic for binary data', height=500 ) else: fig.update_layout( title_text='Receiver operating characteristic for multi-class data', height=500 ) footnote = """<span style="font-size:0.8em"><i> The marked points are the optimal threshold cut-off points. They are determined using Youden's index defined as sensitivity + specificity - 1 </i></span>""" display = [fig, footnote] else: display = None return CheckResult(roc_auc, header='ROC Report', display=display) def add_condition_auc_greater_than(self, min_auc: float = 0.7): """Add condition - require min allowed AUC score per class. Parameters ---------- min_auc : float , default: 0.7 Max allowed AUC score per class. """ def condition(result: Dict) -> ConditionResult: failed_classes = {class_name: format_number(score) for class_name, score in result.items() if score <= min_auc} if failed_classes: return ConditionResult(ConditionCategory.FAIL, f'Found classes with AUC below threshold: {failed_classes}') else: class_name, score = get_dict_entry_by_value(result, value_select_fn=min) details = f'All classes passed, minimum AUC found is {format_number(score)} for class {class_name}' return ConditionResult(ConditionCategory.PASS, details) if self.excluded_classes: suffix = f' except: {self.excluded_classes}' else: suffix = '' return self.add_condition(f'AUC score for all the classes{suffix} is greater than {min_auc}', condition) def get_cutoff_figure(tpr, fpr, thresholds, class_name=None): index = sensitivity_specificity_cutoff(tpr, fpr) hovertemplate = 'TPR: %{y:.2%}<br>FPR: %{x:.2%}' + f'<br>Youden\'s Index: {thresholds[index]:.3}' if class_name: hovertemplate += f'<br>Class: {class_name}' return go.Scatter(x=[fpr[index]], y=[tpr[index]], mode='markers', marker_size=15, hovertemplate=hovertemplate, showlegend=False) def sensitivity_specificity_cutoff(tpr, fpr): """Find index of optimal cutoff point on curve. Cut-off is determined using Youden's index defined as sensitivity + specificity - 1. Parameters ---------- tpr : array, shape = [n_roc_points] True positive rate per threshold fpr : array, shape = [n_roc_points] False positive rate per threshold References ---------- Ewald, B. (2006). Post hoc choice of cut points introduced bias to diagnostic research. Journal of clinical epidemiology, 59(8), 798-801. Steyerberg, E.W., Van Calster, B., & Pencina, M.J. (2011). Performance measures for prediction models and markers: evaluation of predictions and classifications. Revista Espanola de Cardiologia (English Edition), 64(9), 788-794. Jiménez-Valverde, A., & Lobo, J.M. (2007). Threshold criteria for conversion of probability of species presence to either–or presence–absence. Acta oecologica, 31(3), 361-369. """ return np.argmax(tpr - fpr)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/model_evaluation/roc_report.py
0.952475
0.430686
roc_report.py
pypi
"""Module containing simple comparison check.""" from collections import defaultdict from typing import Callable, Dict, Hashable, List import numpy as np import pandas as pd import plotly.express as px from sklearn.dummy import DummyClassifier, DummyRegressor from sklearn.pipeline import Pipeline from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from runml_checks.core import CheckResult, ConditionResult from runml_checks.core.condition import ConditionCategory from runml_checks.core.errors import runml_checksValueError from runml_checks.tabular import Context, Dataset, TrainTestCheck from runml_checks.tabular.utils.task_type import TaskType from runml_checks.utils.distribution.preprocessing import ScaledNumerics from runml_checks.utils.metrics import get_gain from runml_checks.utils.simple_models import RandomModel from runml_checks.utils.strings import format_percent __all__ = ['SimpleModelComparison'] class SimpleModelComparison(TrainTestCheck): """Compare given model score to simple model score (according to given model type). Parameters ---------- simple_model_type : str , default: constant Type of the simple model ['random', 'constant', 'tree']. + random - select one of the labels by random. + constant - in regression is mean value, in classification the most common value. + tree - runs a simple decision tree. alternative_scorers : Dict[str, Callable], default None An optional dictionary of scorer title to scorer functions/names. If none given, using default scorers. For description about scorers see Notes below. max_gain : float , default: 50 the maximum value for the gain value, limits from both sides [-max_gain, max_gain] max_depth : int , default: 3 the max depth of the tree (used only if simple model type is tree). random_state : int , default: 42 the random state (used only if simple model type is tree or random). Notes ----- Scorers are a convention of sklearn to evaluate a model. `See scorers documentation <https://scikit-learn.org/stable/modules/model_evaluation.html#scoring>`_ A scorer is a function which accepts (model, X, y_true) and returns a float result which is the score. For every scorer higher scores are better than lower scores. You can create a scorer out of existing sklearn metrics: .. code-block:: python from sklearn.metrics import roc_auc_score, make_scorer training_labels = [1, 2, 3] auc_scorer = make_scorer(roc_auc_score, labels=training_labels, multi_class='ovr') # Note that the labels parameter is required for multi-class classification in metrics like roc_auc_score or # log_loss that use the predict_proba function of the model, in case that not all labels are present in the test # set. Or you can implement your own: .. code-block:: python from sklearn.metrics import make_scorer def my_mse(y_true, y_pred): return (y_true - y_pred) ** 2 # Mark greater_is_better=False, since scorers always suppose to return # value to maximize. my_mse_scorer = make_scorer(my_mse, greater_is_better=False) """ def __init__( self, simple_model_type: str = 'constant', alternative_scorers: Dict[str, Callable] = None, max_gain: float = 50, max_depth: int = 3, random_state: int = 42, **kwargs ): super().__init__(**kwargs) self.simple_model_type = simple_model_type self.user_scorers = alternative_scorers self.max_gain = max_gain self.max_depth = max_depth self.random_state = random_state def run_logic(self, context: Context) -> CheckResult: """Run check. Returns ------- CheckResult value is a Dict of: given_model_score, simple_model_score, ratio <br> ratio is given model / simple model (if the scorer returns negative values we divide 1 by it) <br> if ratio is infinite max_ratio is returned Raises ------ runml_checksValueError If the object is not a Dataset instance. """ train_dataset = context.train test_dataset = context.test test_label = test_dataset.label_col task_type = context.task_type model = context.model # If user defined scorers used them, else use a single scorer if self.user_scorers: scorers = context.get_scorers(self.user_scorers, class_avg=False) else: scorers = [context.get_single_scorer(class_avg=False)] simple_model = self._create_simple_model(train_dataset, task_type) models = [ (f'{type(model).__name__} model', 'Origin', model), (f'Simple model - {self.simple_model_type}', 'Simple', simple_model) ] # Multiclass have different return type from the scorer, list of score per class instead of single score if task_type in [TaskType.MULTICLASS, TaskType.BINARY]: n_samples = test_label.groupby(test_label).count() classes = [clazz for clazz in test_dataset.classes if clazz in train_dataset.classes] display_array = [] # Dict in format { Scorer : Dict { Class : Dict { Origin/Simple : score } } } results_dict = {} for scorer in scorers: model_dict = defaultdict(dict) for model_name, model_type, model_instance in models: for class_score, class_value in zip(scorer(model_instance, test_dataset), classes): model_dict[class_value][model_type] = class_score if context.with_display: display_array.append([model_name, model_type, class_score, scorer.name, class_value, n_samples[class_value] ]) results_dict[scorer.name] = model_dict if display_array: display_df = pd.DataFrame( display_array, columns=['Model', 'Type', 'Value', 'Metric', 'Class', 'Number of samples'] ) # Plot the metrics in a graph, grouping by the model and class fig = ( px.histogram( display_df, x=['Class', 'Model'], y='Value', color='Model', barmode='group', facet_col='Metric', facet_col_spacing=0.05, hover_data=['Number of samples']) .update_xaxes(title=None, tickprefix='Class ', tickangle=60, type='category') .update_yaxes(title=None, matches=None) .for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) .for_each_yaxis(lambda yaxis: yaxis.update(showticklabels=True)) ) else: fig = None else: classes = None display_array = [] # Dict in format { Scorer : Dict { Origin/Simple : score } } results_dict = {} for scorer in scorers: model_dict = defaultdict(dict) for model_name, model_type, model_instance in models: score = scorer(model_instance, test_dataset) model_dict[model_type] = score if context.with_display: display_array.append([model_name, model_type, score, scorer.name, test_label.count() ]) results_dict[scorer.name] = model_dict if display_array: display_df = pd.DataFrame( display_array, columns=['Model', 'Type', 'Value', 'Metric', 'Number of samples'] ) # Plot the metrics in a graph, grouping by the model fig = ( px.histogram( display_df, x='Model', y='Value', color='Model', barmode='group', facet_col='Metric', facet_col_spacing=0.05, hover_data=['Number of samples']) .update_xaxes(title=None) .update_yaxes(title=None, matches=None) .for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) .for_each_yaxis(lambda yaxis: yaxis.update(showticklabels=True)) ) else: fig = None # For each scorer calculate perfect score in order to calculate later the ratio in conditions scorers_perfect = {scorer.name: scorer.score_perfect(test_dataset) for scorer in scorers} return CheckResult({'scores': results_dict, 'type': task_type, 'scorers_perfect': scorers_perfect, 'classes': classes }, display=fig) def _create_simple_model(self, train_ds: Dataset, task_type: TaskType): """Create a simple model of given type (random/constant/tree) to the given dataset. Parameters ---------- train_ds : Dataset The training dataset object. task_type : TaskType the model type. Returns ------- object Classifier Object Raises ------ NotImplementedError If the simple_model_type is not supported """ np.random.seed(self.random_state) if self.simple_model_type == 'random': simple_model = RandomModel() elif self.simple_model_type == 'constant': if task_type == TaskType.REGRESSION: simple_model = DummyRegressor(strategy='mean') elif task_type in {TaskType.BINARY, TaskType.MULTICLASS}: simple_model = DummyClassifier(strategy='most_frequent') else: raise runml_checksValueError(f'Unknown task type - {task_type}') elif self.simple_model_type == 'tree': if task_type == TaskType.REGRESSION: clf = DecisionTreeRegressor( max_depth=self.max_depth, random_state=self.random_state ) elif task_type in {TaskType.BINARY, TaskType.MULTICLASS}: clf = DecisionTreeClassifier( max_depth=self.max_depth, random_state=self.random_state, class_weight='balanced' ) else: raise runml_checksValueError(f'Unknown task type - {task_type}') simple_model = Pipeline([('scaler', ScaledNumerics(train_ds.cat_features, max_num_categories=10)), ('tree-model', clf)]) else: raise runml_checksValueError( f'Unknown model type - {self.simple_model_type}, expected to be one of ' f"['random', 'constant', 'tree'] " f"but instead got {self.simple_model_type}" # pylint: disable=inconsistent-quotes ) simple_model.fit(train_ds.data[train_ds.features], train_ds.data[train_ds.label_name]) return simple_model def add_condition_gain_greater_than(self, min_allowed_gain: float = 0.1, classes: List[Hashable] = None, average: bool = False): """Add condition - require minimum allowed gain between the model and the simple model. Parameters ---------- min_allowed_gain : float , default: 0.1 Minimum allowed gain between the model and the simple model - gain is: difference in performance / (perfect score - simple score) classes : List[Hashable] , default: None Used in classification models to limit condition only to given classes. average : bool , default: False Used in classification models to flag if to run condition on average of classes, or on each class individually """ name = f'Model performance gain over simple model is greater than {format_percent(min_allowed_gain)}' if classes: name = name + f' for classes {str(classes)}' return self.add_condition(name, condition, include_classes=classes, min_allowed_gain=min_allowed_gain, max_gain=self.max_gain, average=average) def condition(result: Dict, include_classes=None, average=False, max_gain=None, min_allowed_gain=None) -> \ ConditionResult: scores = result['scores'] task_type = result['type'] scorers_perfect = result['scorers_perfect'] passed_condition = True if task_type in [TaskType.MULTICLASS, TaskType.BINARY] and not average: passed_metrics = {} failed_classes = defaultdict(dict) perfect_metrics = [] for metric, classes_scores in scores.items(): gains = {} metric_passed = True for clas, models_scores in classes_scores.items(): # Skip if class is not in class list if include_classes is not None and clas not in include_classes: continue # If origin model is perfect, skip the gain calculation if models_scores['Origin'] == scorers_perfect[metric]: continue gains[clas] = get_gain(models_scores['Simple'], models_scores['Origin'], scorers_perfect[metric], max_gain) # Save dict of failed classes and metrics gain if gains[clas] <= min_allowed_gain: failed_classes[clas][metric] = format_percent(gains[clas]) metric_passed = False if metric_passed and gains: avg_gain = sum(gains.values()) / len(gains) passed_metrics[metric] = format_percent(avg_gain) elif metric_passed and not gains: perfect_metrics.append(metric) if failed_classes: msg = f'Found classes with failed metric\'s gain: {dict(failed_classes)}' passed_condition = False elif passed_metrics: msg = f'All classes passed, average gain for metrics: {passed_metrics}' else: msg = f'Found metrics with perfect score, no gain is calculated: {perfect_metrics}' else: passed_metrics = {} failed_metrics = {} perfect_metrics = [] if task_type in [TaskType.MULTICLASS, TaskType.BINARY]: scores = average_scores(scores, include_classes) for metric, models_scores in scores.items(): # If origin model is perfect, skip the gain calculation if models_scores['Origin'] == scorers_perfect[metric]: perfect_metrics.append(metric) continue gain = get_gain(models_scores['Simple'], models_scores['Origin'], scorers_perfect[metric], max_gain) if gain <= min_allowed_gain: failed_metrics[metric] = format_percent(gain) else: passed_metrics[metric] = format_percent(gain) if failed_metrics: msg = f'Found failed metrics: {failed_metrics}' passed_condition = False elif passed_metrics: msg = f'All metrics passed, metric\'s gain: {passed_metrics}' else: msg = f'Found metrics with perfect score, no gain is calculated: {perfect_metrics}' category = ConditionCategory.PASS if passed_condition else ConditionCategory.FAIL return ConditionResult(category, msg) def average_scores(scores, include_classes): result = {} for metric, classes_scores in scores.items(): origin_score = 0 simple_score = 0 total = 0 for clas, models_scores in classes_scores.items(): # Skip if class is not in class list if include_classes is not None and clas not in include_classes: continue origin_score += models_scores['Origin'] simple_score += models_scores['Simple'] total += 1 result[metric] = { 'Origin': origin_score / total, 'Simple': simple_score / total } return result
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/model_evaluation/simple_model_comparison.py
0.911913
0.593433
simple_model_comparison.py
pypi
"""Boosting overfit check module.""" from copy import deepcopy from typing import Callable, Tuple, Union import numpy as np import plotly.graph_objects as go from sklearn.pipeline import Pipeline from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.core.errors import runml_checksValueError, ModelValidationError from runml_checks.tabular import Context, TrainTestCheck from runml_checks.utils.model import get_model_of_pipeline from runml_checks.utils.strings import format_percent __all__ = ['BoostingOverfit'] class PartialBoostingModel: """Wrapper for boosting models which limits the number of estimators being used in the prediction.""" _UNSUPORTED_MODEL_ERROR = ( 'Check is relevant for Boosting models of type ' '{supported_models}, but received model of type {model_type}' ) _SUPPORTED_CLASSIFICATION_MODELS = ( 'AdaBoostClassifier', 'GradientBoostingClassifier', 'LGBMClassifier', 'XGBClassifier', 'CatBoostClassifier' ) _SUPPORTED_REGRESSION_MODELS = ( 'AdaBoostRegressor', 'GradientBoostingRegressor', 'LGBMRegressor', 'XGBRegressor', 'CatBoostRegressor' ) _SUPPORTED_MODELS = _SUPPORTED_CLASSIFICATION_MODELS + _SUPPORTED_REGRESSION_MODELS def __init__(self, model, step): """Construct wrapper for model with `predict` and `predict_proba` methods. Parameters ---------- model boosting model to wrap. step Number of iterations/estimators to limit the model on predictions. """ self.model_class = get_model_of_pipeline(model).__class__.__name__ self.step = step if self.model_class in ['AdaBoostClassifier', 'GradientBoostingClassifier', 'AdaBoostRegressor', 'GradientBoostingRegressor']: self.model = deepcopy(model) if isinstance(model, Pipeline): internal_estimator = get_model_of_pipeline(self.model) internal_estimator.estimators_ = internal_estimator.estimators_[:self.step] else: self.model.estimators_ = self.model.estimators_[:self.step] else: self.model = model def predict_proba(self, x): if self.model_class in ['AdaBoostClassifier', 'GradientBoostingClassifier']: return self.model.predict_proba(x) elif self.model_class == 'LGBMClassifier': return self.model.predict_proba(x, num_iteration=self.step) elif self.model_class == 'XGBClassifier': return self.model.predict_proba(x, iteration_range=(0, self.step)) elif self.model_class == 'CatBoostClassifier': return self.model.predict_proba(x, ntree_end=self.step) else: raise ModelValidationError(self._UNSUPORTED_MODEL_ERROR.format( supported_models=self._SUPPORTED_CLASSIFICATION_MODELS, model_type=self.model_class )) def predict(self, x): if self.model_class in ['AdaBoostClassifier', 'GradientBoostingClassifier', 'AdaBoostRegressor', 'GradientBoostingRegressor']: return self.model.predict(x) elif self.model_class in ['LGBMClassifier', 'LGBMRegressor']: return self.model.predict(x, num_iteration=self.step) elif self.model_class in ['XGBClassifier', 'XGBRegressor']: return self.model.predict(x, iteration_range=(0, self.step)) elif self.model_class in ['CatBoostClassifier', 'CatBoostRegressor']: return self.model.predict(x, ntree_end=self.step) else: raise ModelValidationError(self._UNSUPORTED_MODEL_ERROR.format( supported_models=self._SUPPORTED_MODELS, model_type=self.model_class )) @classmethod def n_estimators(cls, model): model = get_model_of_pipeline(model) model_class = model.__class__.__name__ if model_class in ['AdaBoostClassifier', 'GradientBoostingClassifier', 'AdaBoostRegressor', 'GradientBoostingRegressor']: return len(model.estimators_) elif model_class in ['LGBMClassifier', 'LGBMRegressor']: return model.n_estimators elif model_class in ['XGBClassifier', 'XGBRegressor']: return model.n_estimators elif model_class in ['CatBoostClassifier', 'CatBoostRegressor']: return model.tree_count_ else: raise ModelValidationError(cls._UNSUPORTED_MODEL_ERROR.format( supported_models=cls._SUPPORTED_MODELS, model_type=model_class )) class BoostingOverfit(TrainTestCheck): """Check for overfit caused by using too many iterations in a gradient boosted model. The check runs a pred-defined number of steps, and in each step it limits the boosting model to use up to X estimators (number of estimators is monotonic increasing). It plots the given score calculated for each step for both the train dataset and the test dataset. Parameters ---------- scorer : Union[Callable, str] , default: None Scorer used to verify the model, either function or sklearn scorer name. scorer_name : str , default: None Name to be displayed in the plot on y-axis. must be used together with 'scorer' num_steps : int , default: 20 Number of splits of the model iterations to check. """ def __init__( self, alternative_scorer: Tuple[str, Union[str, Callable]] = None, num_steps: int = 20, **kwargs ): super().__init__(**kwargs) self.user_scorer = dict([alternative_scorer]) if alternative_scorer else None self.num_steps = num_steps if not isinstance(self.num_steps, int) or self.num_steps < 2: raise runml_checksValueError('num_steps must be an integer larger than 1') def run_logic(self, context: Context) -> CheckResult: """Run check. Returns ------- CheckResult The score value on the test dataset. """ train_dataset = context.train test_dataset = context.test model = context.model # Get default scorer scorer = context.get_single_scorer(self.user_scorer) # Get number of estimators on model num_estimators = PartialBoostingModel.n_estimators(model) estimator_steps = _calculate_steps(self.num_steps, num_estimators) train_scores = [] test_scores = [] for step in estimator_steps: train_scores.append(_partial_score(scorer, train_dataset, model, step)) test_scores.append(_partial_score(scorer, test_dataset, model, step)) result = {'test': test_scores, 'train': train_scores} if context.with_display: fig = go.Figure() fig.add_trace(go.Scatter(x=estimator_steps, y=np.array(train_scores), mode='lines+markers', name='Training score')) fig.add_trace(go.Scatter(x=estimator_steps, y=np.array(test_scores), mode='lines+markers', name='Test score')) fig.update_layout( title_text=f'{scorer.name} score compared to number of boosting iteration', height=500 ) fig.update_xaxes(title='Number of boosting iterations') fig.update_yaxes(title=scorer.name) display_text = f"""<span> The check limits the boosting model to using up to N estimators each time, and plotting the {scorer.name} calculated for each subset of estimators for both the train dataset and the test dataset. </span>""" display = [display_text, fig] else: display = None return CheckResult(result, display=display, header='Boosting Overfit') def add_condition_test_score_percent_decline_less_than(self, threshold: float = 0.05): """Add condition. Percent of decline between the maximal score achieved in any boosting iteration and the score achieved in the last iteration ("regular" model score) is not above given threshold. Parameters ---------- threshold : float , default: 0.05 Maximum percentage decline allowed (value 0 and above) """ def condition(result: dict): max_score = max(result['test']) last_score = result['test'][-1] pct_diff = (max_score - last_score) / abs(max_score) details = f'Found score decline of {format_percent(-pct_diff)}' category = ConditionCategory.PASS if pct_diff < threshold else ConditionCategory.FAIL return ConditionResult(category, details) name = f'Test score over iterations is less than {format_percent(threshold)} from the best score' return self.add_condition(name, condition) def _partial_score(scorer, dataset, model, step): partial_model = PartialBoostingModel(model, step) return scorer(partial_model, dataset) def _calculate_steps(num_steps, num_estimators): """Calculate steps (integers between 1 to num_estimators) to work on.""" if num_steps >= num_estimators: return list(range(1, num_estimators + 1)) if num_steps <= 5: steps_percents = np.linspace(0, 1.0, num_steps + 1)[1:] steps_numbers = np.ceil(steps_percents * num_estimators) steps_set = {int(s) for s in steps_numbers} else: steps_percents = np.linspace(5 / num_estimators, 1.0, num_steps - 4)[1:] steps_numbers = np.ceil(steps_percents * num_estimators) steps_set = {int(s) for s in steps_numbers} # We want to forcefully take the first 5 estimators, since they have the largest affect on the model performance steps_set.update({1, 2, 3, 4, 5}) return sorted(steps_set)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/model_evaluation/boosting_overfit.py
0.957675
0.515132
boosting_overfit.py
pypi
"""The calibration score check module.""" import typing as t import plotly.graph_objects as go from sklearn.calibration import calibration_curve from sklearn.metrics import brier_score_loss from runml_checks.core import CheckResult from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.utils.typing import ClassificationModel __all__ = ['CalibrationScore'] class CalibrationScore(SingleDatasetCheck): """Calculate the calibration curve with brier score for each class.""" def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check. Returns ------- CheckResult value is dictionary of a class and its brier score, displays the calibration curve graph with each class Raises ------ runml_checksValueError: If the data is not a Dataset instance with a label. """ dataset = context.get_data_by_kind(dataset_kind) context.assert_classification_task() ds_x = dataset.features_columns ds_y = dataset.label_col dataset_classes = dataset.classes model = t.cast(ClassificationModel, context.model) # Expect predict_proba to return in order of the sorted classes. y_pred = model.predict_proba(ds_x) briers_scores = {} if len(dataset_classes) == 2: briers_scores[0] = brier_score_loss(ds_y == dataset_classes[1], y_pred[:, 1]) else: for class_index, class_name in enumerate(dataset_classes): prob_pos = y_pred[:, class_index] clf_score = brier_score_loss(ds_y == class_name, prob_pos) briers_scores[class_name] = clf_score if context.with_display: fig = go.Figure() fig.add_trace(go.Scatter( x=[0, 1], y=[0, 1], line_width=2, line_dash='dash', name='Perfectly calibrated', )) if len(dataset_classes) == 2: # Calibration curve must get labels of {0, 1} therefore in order to support other labels, apply mapping ds_y = ds_y.apply(lambda x: 0 if x == dataset_classes[0] else 1) fraction_of_positives, mean_predicted_value = calibration_curve(ds_y, y_pred[:, 1], n_bins=10) fig.add_trace(go.Scatter( x=mean_predicted_value, y=fraction_of_positives, mode='lines+markers', name=f'(brier:{briers_scores[0]:9.4f})', )) else: for class_index, class_name in enumerate(dataset_classes): prob_pos = y_pred[:, class_index] fraction_of_positives, mean_predicted_value = \ calibration_curve(ds_y == class_name, prob_pos, n_bins=10) fig.add_trace(go.Scatter( x=mean_predicted_value, y=fraction_of_positives, mode='lines+markers', name=f'{class_name} (brier:{briers_scores[class_name]:9.4f})', )) fig.update_layout( title_text='Calibration plots (reliability curve)', height=500 ) fig.update_yaxes(title='Fraction of positives') fig.update_xaxes(title='Mean predicted value') calibration_text = 'Calibration curves (also known as reliability diagrams) compare how well the ' \ 'probabilistic predictions of a binary classifier are calibrated. It plots the true ' \ 'frequency of the positive label against its predicted probability, for binned predictions.' brier_text = 'The Brier score metric may be used to assess how well a classifier is calibrated. For more ' \ 'info, please visit https://en.wikipedia.org/wiki/Brier_score' display = [calibration_text, fig, brier_text] else: display = None return CheckResult(briers_scores, header='Calibration Metric', display=display)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/model_evaluation/calibration_score.py
0.956043
0.562777
calibration_score.py
pypi
"""The model inference time check module.""" import timeit import typing as t import numpy as np from runml_checks.core import CheckResult, ConditionResult from runml_checks.core.condition import ConditionCategory from runml_checks.core.errors import runml_checksValueError from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.utils.strings import format_number __all__ = ['ModelInferenceTime'] MI = t.TypeVar('MI', bound='ModelInferenceTime') class ModelInferenceTime(SingleDatasetCheck): """Measure model average inference time (in seconds) per sample. Parameters ---------- n_samples : int , default: 1000 number of samples to use for inference, but if actual dataset is smaller then all samples will be used """ def __init__(self, n_samples: int = 1000, **kwargs): super().__init__(**kwargs) self.n_samples = n_samples if n_samples == 0 or n_samples < 0: raise runml_checksValueError('n_samples cannot be le than 0!') def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check. Returns ------- CheckResult value is of the type 'float' . Raises ------ runml_checksValueError If the test dataset is not a 'Dataset' instance with a label or if 'model' is not a scikit-learn-compatible fitted estimator instance. """ dataset = context.get_data_by_kind(dataset_kind) model = context.model df = dataset.features_columns prediction_method = model.predict # type: ignore n_samples = len(df) if len(df) < self.n_samples else self.n_samples df = df.sample(n=n_samples, random_state=np.random.randint(n_samples)) result = timeit.timeit( 'predict(*args)', globals={'predict': prediction_method, 'args': (df,)}, number=1 ) result = result / n_samples return CheckResult(value=result, display=( 'Average model inference time for one sample (in seconds): ' f'{format_number(result, floating_point=8)}' )) def add_condition_inference_time_less_than(self: MI, value: float = 0.001) -> MI: """Add condition - the average model inference time (in seconds) per sample is less than threshold. Parameters ---------- value : float , default: 0.001 condition threshold. Returns ------- MI """ def condition(average_time: float) -> ConditionResult: details = f'Found average inference time (seconds): {format_number(average_time, floating_point=8)}' category = ConditionCategory.PASS if average_time < value else ConditionCategory.FAIL return ConditionResult(category=category, details=details) return self.add_condition(condition_func=condition, name=( f'Average model inference time for one sample is less than {format_number(value, floating_point=8)}' ))
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/model_evaluation/model_inference_time.py
0.947636
0.48054
model_inference_time.py
pypi
"""Module of weak segments performance check.""" from collections import defaultdict from typing import Callable, Dict, List, Union import numpy as np import pandas as pd import plotly.express as px import sklearn from category_encoders import TargetEncoder from packaging import version from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeRegressor from runml_checks import ConditionCategory, ConditionResult, Dataset from runml_checks.core import CheckResult from runml_checks.core.check_result import DisplayMap from runml_checks.core.errors import runml_checksNotSupportedError, runml_checksProcessError from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.tabular.context import _DummyModel from runml_checks.tabular.utils.task_type import TaskType from runml_checks.utils.dataframes import default_fill_na_per_column_type from runml_checks.utils.performance.partition import (convert_tree_leaves_into_filters, partition_numeric_feature_around_segment) from runml_checks.utils.single_sample_metrics import calculate_per_sample_loss from runml_checks.utils.strings import format_number, format_percent from runml_checks.utils.typing import Hashable __all__ = ['WeakSegmentsPerformance'] class WeakSegmentsPerformance(SingleDatasetCheck): """Search for segments with low performance scores. The check is designed to help you easily identify weak spots of your model and provide a deepdive analysis into its performance on different segments of your data. Specifically, it is designed to help you identify the model weakest segments in the data distribution for further improvement and visibility purposes. In order to achieve this, the check trains several simple tree based models which try to predict the error of the user provided model on the dataset. The relevant segments are detected by analyzing the different leafs of the trained trees. Parameters ---------- columns : Union[Hashable, List[Hashable]] , default: None Columns to check, if none are given checks all columns except ignored ones. ignore_columns : Union[Hashable, List[Hashable]] , default: None Columns to ignore, if none given checks based on columns variable n_top_features : int , default: 5 Number of features to use for segment search. Top columns are selected based on feature importance. segment_minimum_size_ratio: float , default: 0.05 Minimum size ratio for segments. Will only search for segments of size >= segment_minimum_size_ratio * data_size. alternative_scorer : Tuple[str, Union[str, Callable]] , default: None Scorer to use as performance measure, either function or sklearn scorer name. If None, a default scorer (per the model type) will be used. loss_per_sample: Union[np.array, pd.Series, None], default: None Loss per sample used to detect relevant weak segments. If pd.Series the indexes should be similar to those in the dataset object provide, if np.array the order should be based on the index order of the dataset object and if None the check calculates loss per sample by via log loss for classification and MSE for regression. n_samples : int , default: 10_000 number of samples to use for this check. n_to_show : int , default: 3 number of segments with the weakest performance to show. categorical_aggregation_threshold : float , default: 0.05 In each categorical column, categories with frequency below threshold will be merged into "Other" category. random_state : int, default: 42 random seed for all check internals. """ def __init__( self, columns: Union[Hashable, List[Hashable], None] = None, ignore_columns: Union[Hashable, List[Hashable], None] = None, n_top_features: int = 5, segment_minimum_size_ratio: float = 0.05, alternative_scorer: Dict[str, Callable] = None, loss_per_sample: Union[np.array, pd.Series, None] = None, classes_index_order: Union[np.array, pd.Series, None] = None, n_samples: int = 10_000, categorical_aggregation_threshold: float = 0.05, n_to_show: int = 3, random_state: int = 42, **kwargs ): super().__init__(**kwargs) self.columns = columns self.ignore_columns = ignore_columns self.n_top_features = n_top_features self.segment_minimum_size_ratio = segment_minimum_size_ratio self.n_samples = n_samples self.n_to_show = n_to_show self.random_state = random_state self.loss_per_sample = loss_per_sample self.classes_index_order = classes_index_order self.user_scorer = alternative_scorer if alternative_scorer else None self.categorical_aggregation_threshold = categorical_aggregation_threshold def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check.""" dataset = context.get_data_by_kind(dataset_kind) dataset.assert_features() dataset = dataset.sample(self.n_samples, random_state=self.random_state, drop_na_label=True) predictions = context.model.predict(dataset.features_columns) y_proba = context.model.predict_proba(dataset.features_columns) if \ context.task_type in [TaskType.MULTICLASS, TaskType.BINARY] else None if self.loss_per_sample is not None: loss_per_sample = self.loss_per_sample[list(dataset.data.index)] else: loss_per_sample = calculate_per_sample_loss(context.model, context.task_type, dataset, self.classes_index_order) dataset = dataset.select(self.columns, self.ignore_columns, keep_label=True) if len(dataset.features) < 2: raise runml_checksNotSupportedError('Check requires data to have at least two features in order to run.') encoded_dataset = self._target_encode_categorical_features_fill_na(dataset) dummy_model = _DummyModel(test=encoded_dataset, y_pred_test=predictions, y_proba_test=y_proba, validate_data_on_predict=False) if context.feature_importance is not None: feature_rank = context.feature_importance.sort_values(ascending=False).keys() feature_rank = np.asarray([col for col in feature_rank if col in encoded_dataset.features]) else: feature_rank = np.asarray(encoded_dataset.features) scorer = context.get_single_scorer(self.user_scorer) weak_segments = self._weak_segments_search(dummy_model, encoded_dataset, feature_rank, loss_per_sample, scorer) if len(weak_segments) == 0: raise runml_checksProcessError('WeakSegmentsPerformance was unable to train an error model to find weak ' 'segments. Try increasing n_samples or supply additional features.') avg_score = round(scorer(dummy_model, encoded_dataset), 3) display = self._create_heatmap_display(dummy_model, encoded_dataset, weak_segments, avg_score, scorer) if context.with_display else [] for idx, segment in weak_segments.copy().iterrows(): for feature in ['Feature1', 'Feature2']: if segment[feature] in encoded_dataset.cat_features: weak_segments[f'{feature} range'][idx] = \ self._format_partition_vec_for_display(segment[f'{feature} range'], segment[feature], None)[0] display_msg = 'Showcasing intersections of features with weakest detected segments.<br> The full list of ' \ 'weak segments can be observed in the check result value. ' return CheckResult({'weak_segments_list': weak_segments, 'avg_score': avg_score, 'scorer_name': scorer.name}, display=[display_msg, DisplayMap(display)]) def _target_encode_categorical_features_fill_na(self, dataset: Dataset) -> Dataset: values_mapping = defaultdict(list) # mapping of per feature of original values to their encoded value df_aggregated = default_fill_na_per_column_type(dataset.features_columns.copy(), dataset.cat_features) for col in dataset.cat_features: categories_to_mask = [k for k, v in df_aggregated[col].value_counts().items() if v / dataset.n_samples < self.categorical_aggregation_threshold] df_aggregated.loc[np.isin(df_aggregated[col], categories_to_mask), col] = 'Other' if len(dataset.cat_features) > 0: t_encoder = TargetEncoder(cols=dataset.cat_features) df_encoded = t_encoder.fit_transform(df_aggregated, dataset.label_col) for col in dataset.cat_features: values_mapping[col] = pd.concat([df_encoded[col], df_aggregated[col]], axis=1).drop_duplicates() else: df_encoded = df_aggregated self.encoder_mapping = values_mapping return Dataset(df_encoded, cat_features=dataset.cat_features, label=dataset.label_col) def _create_heatmap_display(self, dummy_model, encoded_dataset, weak_segments, avg_score, scorer): display_tabs = {} data = encoded_dataset.data idx = -1 while len(display_tabs.keys()) < self.n_to_show and idx + 1 < len(weak_segments): idx += 1 segment = weak_segments.iloc[idx, :] feature1 = data[segment['Feature1']] if segment['Feature2'] != '': feature2 = data[segment['Feature2']] segments_f1 = partition_numeric_feature_around_segment(feature1, segment['Feature1 range']) segments_f2 = partition_numeric_feature_around_segment(feature2, segment['Feature2 range']) else: feature2 = pd.Series(np.ones(len(feature1))) segments_f1 = partition_numeric_feature_around_segment(feature1, segment['Feature1 range'], 7) segments_f2 = [0, 2] scores = np.empty((len(segments_f2) - 1, len(segments_f1) - 1), dtype=float) counts = np.empty((len(segments_f2) - 1, len(segments_f1) - 1), dtype=int) for f1_idx in range(len(segments_f1) - 1): for f2_idx in range(len(segments_f2) - 1): segment_data = data[ np.asarray(feature1.between(segments_f1[f1_idx], segments_f1[f1_idx + 1])) * np.asarray( feature2.between(segments_f2[f2_idx], segments_f2[f2_idx + 1]))] if segment_data.empty: scores[f2_idx, f1_idx] = np.NaN counts[f2_idx, f1_idx] = 0 else: scores[f2_idx, f1_idx] = scorer.run_on_data_and_label(dummy_model, segment_data, segment_data[encoded_dataset.label_name]) counts[f2_idx, f1_idx] = len(segment_data) f1_labels = self._format_partition_vec_for_display(segments_f1, segment['Feature1']) f2_labels = self._format_partition_vec_for_display(segments_f2, segment['Feature2']) scores_text = [[0] * scores.shape[1] for _ in range(scores.shape[0])] counts = np.divide(counts, len(data)) for i in range(len(f2_labels)): for j in range(len(f1_labels)): score = scores[i, j] if not np.isnan(score): scores_text[i][j] = f'{format_number(score)}\n({format_percent(counts[i, j])})' elif counts[i, j] == 0: scores_text[i][j] = '' else: scores_text[i][j] = f'{score}\n({format_percent(counts[i, j])})' # Plotly FigureWidget have bug with numpy nan, so replacing with python None scores = scores.astype(np.object) scores[np.isnan(scores.astype(np.float_))] = None labels = dict(x=segment['Feature1'], y=segment['Feature2'], color=f'{scorer.name} score') fig = px.imshow(scores, x=f1_labels, y=f2_labels, labels=labels, color_continuous_scale='rdylgn') fig.update_traces(text=scores_text, texttemplate='%{text}') fig.update_layout( title=f'{scorer.name} score (percent of data) {segment["Feature1"]} vs {segment["Feature2"]}', height=600 ) msg = f'Check ran on {encoded_dataset.n_samples} data samples. Average {scorer.name} ' \ f'score is {format_number(avg_score)}.' display_tabs[f'{segment["Feature1"]} vs {segment["Feature2"]}'] = [fig, msg] return display_tabs def _weak_segments_search(self, dummy_model, encoded_dataset, feature_rank_for_search, loss_per_sample, scorer): """Search for weak segments based on scorer.""" weak_segments = pd.DataFrame( columns=[f'{scorer.name} score', 'Feature1', 'Feature1 range', 'Feature2', 'Feature2 range', '% of data']) for i in range(min(len(feature_rank_for_search), self.n_top_features)): for j in range(i + 1, min(len(feature_rank_for_search), self.n_top_features)): feature1, feature2 = feature_rank_for_search[[i, j]] weak_segment_score, weak_segment_filter = self._find_weak_segment(dummy_model, encoded_dataset, [feature1, feature2], scorer, loss_per_sample) if weak_segment_score is None or len(weak_segment_filter.filters) == 0: continue data_size = 100 * weak_segment_filter.filter(encoded_dataset.data).shape[0] / encoded_dataset.n_samples filters = weak_segment_filter.filters if len(filters.keys()) == 1: weak_segments.loc[len(weak_segments)] = [weak_segment_score, list(filters.keys())[0], tuple(list(filters.values())[0]), '', None, data_size] else: weak_segments.loc[len(weak_segments)] = [weak_segment_score, feature1, tuple(filters[feature1]), feature2, tuple(filters[feature2]), data_size] return weak_segments.drop_duplicates().sort_values(f'{scorer.name} score') def _find_weak_segment(self, dummy_model, dataset, features_for_segment, scorer, loss_per_sample): """Find weak segment based on scorer for specified features.""" if version.parse(sklearn.__version__) < version.parse('1.0.0'): criterion = ['mse', 'mae'] else: criterion = ['squared_error', 'absolute_error'] search_space = { 'max_depth': [5], 'min_weight_fraction_leaf': [self.segment_minimum_size_ratio], 'min_samples_leaf': [10], 'criterion': criterion } def get_worst_leaf_filter(tree): leaves_filters = convert_tree_leaves_into_filters(tree, features_for_segment) min_score, min_score_leaf_filter = np.inf, None for leaf_filter in leaves_filters: leaf_data = leaf_filter.filter(dataset.data) leaf_score = scorer.run_on_data_and_label(dummy_model, leaf_data, leaf_data[dataset.label_name]) if leaf_score < min_score: min_score, min_score_leaf_filter = leaf_score, leaf_filter return min_score, min_score_leaf_filter def neg_worst_segment_score(clf: DecisionTreeRegressor, x, y) -> float: # pylint: disable=unused-argument return -get_worst_leaf_filter(clf.tree_)[0] grid_searcher = GridSearchCV(DecisionTreeRegressor(), scoring=neg_worst_segment_score, param_grid=search_space, n_jobs=-1, cv=3) try: grid_searcher.fit(dataset.features_columns[features_for_segment], loss_per_sample) segment_score, segment_filter = get_worst_leaf_filter(grid_searcher.best_estimator_.tree_) except ValueError: return None, None return segment_score, segment_filter def _format_partition_vec_for_display(self, partition_vec: np.array, feature_name: str, seperator: Union[str, None] = '<br>') -> List[Union[List, str]]: """Format partition vector for display. If seperator is None returns a list instead of a string.""" if feature_name == '': return [''] result = [] if feature_name in self.encoder_mapping.keys(): feature_map_df = self.encoder_mapping[feature_name] encodings = feature_map_df.iloc[:, 0] for lower, upper in zip(partition_vec[:-1], partition_vec[1:]): if lower == partition_vec[0]: values_in_range = np.where(np.logical_and(encodings >= lower, encodings <= upper))[0] else: values_in_range = np.where(np.logical_and(encodings > lower, encodings <= upper))[0] if seperator is None: result.append(feature_map_df.iloc[values_in_range, 1].to_list()) else: result.append(seperator.join([str(x) for x in feature_map_df.iloc[values_in_range, 1]])) else: for lower, upper in zip(partition_vec[:-1], partition_vec[1:]): result.append(f'({format_number(lower)}, {format_number(upper)}]') result[0] = '[' + result[0][1:] return result def add_condition_segments_relative_performance_greater_than(self, max_ratio_change: float = 0.20): """Add condition - check that the score of the weakest segment is greater than supplied relative threshold. Parameters ---------- max_ratio_change : float , default: 0.20 maximal ratio of change allowed between the average score and the score of the weakest segment. """ def condition(result: Dict) -> ConditionResult: weakest_segment_score = result['weak_segments_list'].iloc[0, 0] msg = f'Found a segment with {result["scorer_name"]} score of {format_number(weakest_segment_score, 3)} ' \ f'in comparison to an average score of {format_number(result["avg_score"], 3)} in sampled data.' if result['avg_score'] > 0 and weakest_segment_score > (1 - max_ratio_change) * result['avg_score']: return ConditionResult(ConditionCategory.PASS, msg) elif result['avg_score'] < 0 and weakest_segment_score > (1 + max_ratio_change) * result['avg_score']: return ConditionResult(ConditionCategory.PASS, msg) else: return ConditionResult(ConditionCategory.WARN, msg) return self.add_condition(f'The relative performance of weakest segment is greater than ' f'{format_percent(1 - max_ratio_change)} of average model performance.', condition)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/model_evaluation/weak_segments_performance.py
0.938717
0.54698
weak_segments_performance.py
pypi
"""The regression_error_distribution check module.""" import pandas as pd import plotly.express as px from scipy.stats import kurtosis from runml_checks.core import CheckResult, ConditionCategory, ConditionResult from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.utils.strings import format_number __all__ = ['RegressionErrorDistribution'] class RegressionErrorDistribution(SingleDatasetCheck): """Check regression error distribution. The check shows the distribution of the regression error, and enables to set conditions on the distribution kurtosis. Kurtosis is a measure of the shape of the distribution, helping us understand if the distribution is significantly "wider" from the normal distribution, which may imply a certain cause of error deforming the normal shape. Parameters ---------- n_top_samples : int , default: 3 amount of samples to show which have the largest under / over estimation errors. n_bins : int , default: 40 number of bins to use for the histogram. """ def __init__(self, n_top_samples: int = 3, n_bins: int = 40, **kwargs): super().__init__(**kwargs) self.n_top_samples = n_top_samples self.n_bins = n_bins def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check. Returns ------- CheckResult value is the kurtosis value (Fisher’s definition (normal ==> 0.0)). display is histogram of error distribution and the largest prediction errors. Raises ------ runml_checksValueError If the object is not a Dataset instance with a label """ dataset = context.get_data_by_kind(dataset_kind) context.assert_regression_task() model = context.model x_test = dataset.features_columns y_test = dataset.label_col y_pred = model.predict(x_test) y_pred = pd.Series(y_pred, name='predicted ' + str(dataset.label_name), index=y_test.index) diff = y_test - y_pred kurtosis_value = kurtosis(diff) if context.with_display: n_largest_diff = diff.nlargest(self.n_top_samples) n_largest_diff.name = str(dataset.label_name) + ' Prediction Difference' n_largest = pd.concat([dataset.data.loc[n_largest_diff.index], y_pred.loc[n_largest_diff.index], n_largest_diff], axis=1) n_smallest_diff = diff.nsmallest(self.n_top_samples) n_smallest_diff.name = str(dataset.label_name) + ' Prediction Difference' n_smallest = pd.concat([dataset.data.loc[n_smallest_diff.index], y_pred.loc[n_smallest_diff.index], n_smallest_diff], axis=1) display = [ px.histogram( x=diff.values, nbins=self.n_bins, title='Histogram of prediction errors', labels={'x': f'{dataset.label_name} prediction error', 'y': 'Count'}, height=500 ), 'Largest over estimation errors:', n_largest, 'Largest under estimation errors:', n_smallest ] else: display = None return CheckResult(value=kurtosis_value, display=display) def add_condition_kurtosis_greater_than(self, min_kurtosis: float = -0.1): """Add condition - require min kurtosis value to be greater than the threshold. Parameters ---------- min_kurtosis : float , default: -0.1 Minimal kurtosis. """ def min_kurtosis_condition(result: float) -> ConditionResult: details = f'Found kurtosis value {format_number(result, 5)}' category = ConditionCategory.PASS if result > min_kurtosis else ConditionCategory.WARN return ConditionResult(category, details) return self.add_condition(f'Kurtosis value is greater than {format_number(min_kurtosis, 5)}', min_kurtosis_condition)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/model_evaluation/regression_error_distribution.py
0.953177
0.699588
regression_error_distribution.py
pypi
"""Module containing multi model performance report check.""" from typing import Callable, Dict, cast import pandas as pd import plotly.express as px from runml_checks.core import CheckResult from runml_checks.tabular import ModelComparisonCheck, ModelComparisonContext from runml_checks.tabular.utils.task_type import TaskType __all__ = ['MultiModelPerformanceReport'] class MultiModelPerformanceReport(ModelComparisonCheck): """Summarize performance scores for multiple models on test datasets. Parameters ---------- alternative_scorers : Dict[str, Callable] , default: None An optional dictionary of scorer name to scorer functions. If none given, using default scorers """ def __init__(self, alternative_scorers: Dict[str, Callable] = None, **kwargs): super().__init__(**kwargs) self.user_scorers = alternative_scorers def run_logic(self, multi_context: ModelComparisonContext): """Run check logic.""" first_context = multi_context[0] scorers = first_context.get_scorers(self.user_scorers, class_avg=False) if multi_context.task_type in [TaskType.MULTICLASS, TaskType.BINARY]: plot_x_axis = ['Class', 'Model'] results = [] for context in multi_context: test = context.test model = context.model label = cast(pd.Series, test.label_col) n_samples = label.groupby(label).count() results.extend( [context.model_name, class_score, scorer.name, class_name, n_samples[class_name]] for scorer in scorers # scorer returns numpy array of results with item per class for class_score, class_name in zip(scorer(model, test), test.classes) ) results_df = pd.DataFrame(results, columns=['Model', 'Value', 'Metric', 'Class', 'Number of samples']) else: plot_x_axis = 'Model' results = [ [context.model_name, scorer(context.model, context.test), scorer.name, cast(pd.Series, context.test.label_col).count()] for context in multi_context for scorer in scorers ] results_df = pd.DataFrame(results, columns=['Model', 'Value', 'Metric', 'Number of samples']) fig = px.histogram( results_df, x=plot_x_axis, y='Value', color='Model', barmode='group', facet_col='Metric', facet_col_spacing=0.05, hover_data=['Number of samples'], ) if multi_context.task_type in [TaskType.MULTICLASS, TaskType.BINARY]: fig.update_xaxes(title=None, tickprefix='Class ', tickangle=60) else: fig.update_xaxes(title=None) fig = ( fig.update_yaxes(title=None, matches=None) .for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) .for_each_yaxis(lambda yaxis: yaxis.update(showticklabels=True)) ) return CheckResult(results_df, display=[fig])
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/model_evaluation/multi_model_performance_report.py
0.946535
0.482368
multi_model_performance_report.py
pypi
"""The RegressionSystematicError check module.""" import plotly.graph_objects as go from sklearn.metrics import mean_squared_error from runml_checks.core import CheckResult, ConditionResult from runml_checks.core.condition import ConditionCategory from runml_checks.tabular import Context, SingleDatasetCheck from runml_checks.utils.strings import format_number __all__ = ['RegressionSystematicError'] class RegressionSystematicError(SingleDatasetCheck): """Check the regression systematic error.""" def run_logic(self, context: Context, dataset_kind) -> CheckResult: """Run check. Returns ------- CheckResult value is a dict with rmse and mean prediction error. display is box plot of the prediction error. Raises ------ runml_checksValueError If the object is not a Dataset instance with a label. """ dataset = context.get_data_by_kind(dataset_kind) context.assert_regression_task() y_test = dataset.label_col x_test = dataset.features_columns y_pred = context.model.predict(x_test) rmse = mean_squared_error(y_test, y_pred, squared=False) diff = y_test - y_pred diff_mean = diff.mean() if context.with_display: fig = ( go.Figure() .add_trace(go.Box( x=diff, orientation='h', name='Model prediction error', hoverinfo='x', boxmean=True)) .update_layout( title_text='Box plot of the model prediction error', height=500 ) ) display = [ 'Non-zero mean of the error distribution indicated the presents ' 'of systematic error in model predictions', fig ] else: display = None return CheckResult(value={'rmse': rmse, 'mean_error': diff_mean}, display=display) def add_condition_systematic_error_ratio_to_rmse_less_than(self, max_ratio: float = 0.01): """Add condition - require the absolute mean systematic error is less than (max_ratio * RMSE). Parameters ---------- max_ratio : float , default: 0.01 Maximum ratio """ def max_bias_condition(result: dict) -> ConditionResult: rmse = result['rmse'] mean_error = result['mean_error'] ratio = abs(mean_error) / rmse details = f'Found bias ratio {format_number(ratio)}' category = ConditionCategory.PASS if ratio < max_ratio else ConditionCategory.FAIL return ConditionResult(category, details) return self.add_condition(f'Bias ratio is less than {format_number(max_ratio)}', max_bias_condition)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/model_evaluation/regression_systematic_error.py
0.961552
0.572603
regression_systematic_error.py
pypi
"""Module containing performance report check.""" from typing import Callable, Dict, TypeVar, Union, cast import pandas as pd import plotly.express as px from runml_checks.core import CheckResult from runml_checks.core.check_utils.class_performance_utils import ( get_condition_class_performance_imbalance_ratio_less_than, get_condition_test_performance_greater_than, get_condition_train_test_relative_degradation_less_than) from runml_checks.core.checks import DatasetKind, ReduceMixin from runml_checks.tabular import Context, TrainTestCheck from runml_checks.utils.metrics import MULTICLASS_SCORERS_NON_AVERAGE, TaskType from runml_checks.utils.strings import format_percent __all__ = ['PerformanceReport'] PR = TypeVar('PR', bound='PerformanceReport') class PerformanceReport(TrainTestCheck, ReduceMixin): """Summarize given scores on a dataset and model. Parameters ---------- alternative_scorers : Dict[str, Callable], default: None An optional dictionary of scorer name to scorer functions. If none given, using default scorers reduce: Union[Callable, str], default: 'mean' An optional argument only used for the reduce_output function when using non-average scorers. Notes ----- Scorers are a convention of sklearn to evaluate a model. `See scorers documentation <https://scikit-learn.org/stable/modules/model_evaluation.html#scoring>`_ A scorer is a function which accepts (model, X, y_true) and returns a float result which is the score. For every scorer higher scores are better than lower scores. You can create a scorer out of existing sklearn metrics: .. code-block:: python from sklearn.metrics import roc_auc_score, make_scorer training_labels = [1, 2, 3] auc_scorer = make_scorer(roc_auc_score, labels=training_labels, multi_class='ovr') # Note that the labels parameter is required for multi-class classification in metrics like roc_auc_score or # log_loss that use the predict_proba function of the model, in case that not all labels are present in the test # set. Or you can implement your own: .. code-block:: python from sklearn.metrics import make_scorer def my_mse(y_true, y_pred): return (y_true - y_pred) ** 2 # Mark greater_is_better=False, since scorers always suppose to return # value to maximize. my_mse_scorer = make_scorer(my_mse, greater_is_better=False) """ def __init__(self, alternative_scorers: Dict[str, Callable] = None, reduce: Union[Callable, str] = 'mean', **kwargs): super().__init__(**kwargs) self.user_scorers = alternative_scorers self.reduce = reduce def run_logic(self, context: Context) -> CheckResult: """Run check. Returns ------- CheckResult value is dictionary in format 'score-name': score-value """ train_dataset = context.train test_dataset = context.test model = context.model task_type = context.task_type scorers = context.get_scorers(self.user_scorers, class_avg=False) datasets = {'Train': train_dataset, 'Test': test_dataset} if task_type in {TaskType.MULTICLASS, TaskType.BINARY}: plot_x_axis = 'Class' results = [] for dataset_name, dataset in datasets.items(): classes = dataset.classes label = cast(pd.Series, dataset.label_col) n_samples = label.groupby(label).count() results.extend( [dataset_name, class_name, scorer.name, class_score, n_samples[class_name]] for scorer in scorers # scorer returns numpy array of results with item per class for class_score, class_name in zip(scorer(model, dataset), classes) ) results_df = pd.DataFrame(results, columns=['Dataset', 'Class', 'Metric', 'Value', 'Number of samples']) else: plot_x_axis = 'Dataset' results = [ [dataset_name, scorer.name, scorer(model, dataset), cast(pd.Series, dataset.label_col).count()] for dataset_name, dataset in datasets.items() for scorer in scorers ] results_df = pd.DataFrame(results, columns=['Dataset', 'Metric', 'Value', 'Number of samples']) if context.with_display: fig = px.histogram( results_df, x=plot_x_axis, y='Value', color='Dataset', barmode='group', facet_col='Metric', facet_col_spacing=0.05, hover_data=['Number of samples'] ) if task_type in [TaskType.MULTICLASS, TaskType.BINARY]: fig.update_xaxes(tickprefix='Class ', tickangle=60) fig = ( fig.update_xaxes(title=None, type='category') .update_yaxes(title=None, matches=None) .for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) .for_each_yaxis(lambda yaxis: yaxis.update(showticklabels=True)) ) else: fig = None return CheckResult( results_df, header='Performance Report', display=fig ) def reduce_output(self, check_result: CheckResult) -> Dict[str, float]: """Return the values of the metrics for the test dataset in {metric: value} format.""" df = check_result.value df = df[df['Dataset'] == DatasetKind.TEST.value] df = df.groupby('Metric').aggregate(self.reduce).reset_index() return dict(zip(df['Metric'], df['Value'])) def add_condition_test_performance_greater_than(self: PR, min_score: float) -> PR: """Add condition - metric scores are greater than the threshold. Parameters ---------- min_score : float Minimum score to pass the check. """ condition = get_condition_test_performance_greater_than(min_score=min_score) return self.add_condition(f'Scores are greater than {min_score}', condition) def add_condition_train_test_relative_degradation_less_than(self: PR, threshold: float = 0.1) -> PR: """Add condition - test performance is not degraded by more than given percentage in train. Parameters ---------- threshold : float , default: 0.1 maximum degradation ratio allowed (value between 0 and 1) """ condition = get_condition_train_test_relative_degradation_less_than(threshold=threshold) return self.add_condition(f'Train-Test scores relative degradation is less than {threshold}', condition) def add_condition_class_performance_imbalance_ratio_less_than( self: PR, threshold: float = 0.3, score: str = None ) -> PR: """Add condition - relative ratio difference between highest-class and lowest-class is less than threshold. Parameters ---------- threshold : float , default: 0.3 ratio difference threshold score : str , default: None limit score for condition Returns ------- Self instance of 'ClassPerformance' or it subtype Raises ------ runml_checksValueError if unknown score function name were passed. """ if score is None: score = next(iter(MULTICLASS_SCORERS_NON_AVERAGE)) condition = get_condition_class_performance_imbalance_ratio_less_than(threshold=threshold, score=score) return self.add_condition( name=f'Relative ratio difference between labels \'{score}\' score is less than {format_percent(threshold)}', condition_func=condition )
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/checks/model_evaluation/performance_report.py
0.955345
0.567277
performance_report.py
pypi
"""The avocado dataset contains historical data on avocado prices and sales volume in multiple US markets.""" import typing as t from urllib.request import urlopen import joblib import pandas as pd import sklearn from category_encoders import OneHotEncoder from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestRegressor from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from runml_checks.tabular.dataset import Dataset __all__ = ['load_data', 'load_fitted_model'] _MODEL_URL = 'https://figshare.com/ndownloader/files/35259829' _FULL_DATA_URL = 'https://figshare.com/ndownloader/files/35259799' _TRAIN_DATA_URL = 'https://figshare.com/ndownloader/files/35259769' _TEST_DATA_URL = 'https://figshare.com/ndownloader/files/35259814' _MODEL_VERSION = '1.0.2' _target = 'AveragePrice' _CAT_FEATURES = ['region', 'type'] _NUM_FEATURES = ['Total Volume', '4046', '4225', 'Total Bags', 'Small Bags', 'Large Bags', 'XLarge Bags'] def load_data(data_format: str = 'Dataset', as_train_test: bool = True) -> \ t.Union[t.Tuple, t.Union[Dataset, pd.DataFrame]]: """Load and returns the Avocado dataset (regression). The avocado dataset contains historical data on avocado prices and sales volume in multiple US markets https://www.kaggle.com/neuromusic/avocado-prices. This dataset is licensed under the Open Data Commons Open Database License (ODbL) v1.0 (https://opendatacommons.org/licenses/odbl/1-0/). The typical ML task in this dataset is to build a model that predicts the average price of Avocados. Dataset Shape: .. list-table:: Dataset Shape :widths: 50 50 :header-rows: 1 * - Property - Value * - Samples Total - 18.2K * - Dimensionality - 14 * - Features - real, string * - Targets - real 0.44 - 3.25 Description: .. list-table:: Dataset Description :widths: 50 50 50 :header-rows: 1 * - Column name - Column Role - Description * - Date - Datetime - The date of the observation * - Total Volume - Feature - Total number of avocados sold * - 4046 - Feature - Total number of avocados with PLU 4046 (small avocados) sold * - 4225 - Feature - Total number of avocados with PLU 4225 (large avocados) sold * - 4770 - Feature - Total number of avocados with PLU 4770 (xlarge avocados) sold * - Total Bags - Feature - * - Small Bags - Feature - * - Large Bags - Feature - * - XLarge Bags - Feature - * - type - Feature - Conventional or organic * - year - Feature - * - region - Feature - The city or region of the observation * - AveragePrice - Label - The average price of a single avocado Parameters ---------- data_format : str , default: Dataset Represent the format of the returned value. Can be 'Dataset'|'Dataframe' 'Dataset' will return the data as a Dataset object 'Dataframe' will return the data as a pandas Dataframe object as_train_test : bool , default: True If True, the returned data is splitted into train and test exactly like the toy model was trained. The first return value is the train data and the second is the test data. In order to get this model, call the load_fitted_model() function. Otherwise, returns a single object. Returns ------- dataset : Union[runml_checks.Dataset, pd.DataFrame] the data object, corresponding to the data_format attribute. train_data, test_data : Tuple[Union[runml_checks.Dataset, pd.DataFrame],Union[runml_checks.Dataset, pd.DataFrame] tuple if as_train_test = True. Tuple of two objects represents the dataset splitted to train and test sets. """ if not as_train_test: dataset = pd.read_csv(_FULL_DATA_URL) if data_format == 'Dataset': dataset = Dataset(dataset, label='AveragePrice', cat_features=_CAT_FEATURES, datetime_name='Date') return dataset else: train = pd.read_csv(_TRAIN_DATA_URL) test = pd.read_csv(_TEST_DATA_URL) if data_format == 'Dataset': train = Dataset(train, label='AveragePrice', cat_features=_CAT_FEATURES, datetime_name='Date') test = Dataset(test, label='AveragePrice', cat_features=_CAT_FEATURES, datetime_name='Date') return train, test def load_fitted_model(pretrained=True): """Load and return a fitted regression model to predict the AveragePrice in the avocado dataset. Returns ------- model : Joblib the model/pipeline that was trained on the Avocado dataset. """ if sklearn.__version__ == _MODEL_VERSION and pretrained: with urlopen(_MODEL_URL) as f: model = joblib.load(f) else: model = _build_model() train, _ = load_data() model.fit(train.data[train.features], train.data[train.label_name]) return model def _build_model(): """Build the model to fit.""" return Pipeline(steps=[ ('preprocessor', ColumnTransformer(transformers=[('num', Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('scaler', StandardScaler())]), _NUM_FEATURES), ('cat', OneHotEncoder(), _CAT_FEATURES)])), ('classifier', RandomForestRegressor(random_state=0, max_depth=7, n_estimators=30)) ])
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/datasets/regression/avocado.py
0.911548
0.705924
avocado.py
pypi
"""The data set contains features for binary prediction of the income of an adult (the adult dataset).""" import typing as t from urllib.request import urlopen import joblib import pandas as pd import sklearn from category_encoders import OrdinalEncoder from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from runml_checks.tabular.dataset import Dataset __all__ = ['load_data', 'load_fitted_model'] _MODEL_URL = 'https://figshare.com/ndownloader/files/35122753' _FULL_DATA_URL = 'https://ndownloader.figshare.com/files/34516457' _TRAIN_DATA_URL = 'https://ndownloader.figshare.com/files/34516448' _TEST_DATA_URL = 'https://ndownloader.figshare.com/files/34516454' _MODEL_VERSION = '1.0.2' _FEATURES = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country'] _target = 'income' _CAT_FEATURES = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country'] _NUM_FEATURES = sorted(list(set(_FEATURES) - set(_CAT_FEATURES))) def load_data(data_format: str = 'Dataset', as_train_test: bool = True) -> \ t.Union[t.Tuple, t.Union[Dataset, pd.DataFrame]]: """Load and returns the Adult dataset (classification). The data has 48842 records with 14 features and one binary target column, referring to whether the person's income is greater than 50K. This is a copy of UCI ML Adult dataset. https://archive.ics.uci.edu/ml/datasets/adult References: * Ron Kohavi, "Scaling Up the Accuracy of Naive-Bayes Classifiers: a Decision-Tree Hybrid", Proceedings of the Second International Conference on Knowledge Discovery and Data Mining, 1996 The typical ML task in this dataset is to build a model that determines whether a person makes over 50K a year. Dataset Shape: .. list-table:: Dataset Shape :widths: 50 50 :header-rows: 1 * - Property - Value * - Samples Total - 48842 * - Dimensionality - 14 * - Features - real, string * - Targets - 2 * - Samples per class - '>50K' - 23.93%, '<=50K' - 76.07% Description: .. list-table:: Dataset Description :widths: 50 50 50 :header-rows: 1 * - Column name - Column Role - Description * - Age - Feature - The age of the person. * - workclass - Feature - [Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked] * - fnlwgt - Feature - Final weight. * - education - Feature - [Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool] * - education-num - Feature - Number of years of education * - marital-status - Feature - [Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse] * - occupation - Feature - [Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces] * - relationship - Feature - [Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried] * - race - Feature - [White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black] * - sex - Feature - [Male, Female] * - capital-gain - Feature - The capital gain of the person * - capital-loss - Feature - The capital loss of the person * - hours-per-week - Feature - The number of hours worked per week * - native-country - Feature - [United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands] * - target - Target - The target variable, whether the person makes over 50K a year. Parameters ---------- data_format : str, default: 'Dataset' Represent the format of the returned value. Can be 'Dataset'|'Dataframe' 'Dataset' will return the data as a Dataset object 'Dataframe' will return the data as a pandas Dataframe object as_train_test : bool, default: True If True, the returned data is splitted into train and test exactly like the toy model was trained. The first return value is the train data and the second is the test data. In order to get this model, call the load_fitted_model() function. Otherwise, returns a single object. Returns ------- dataset : Union[runml_checks.Dataset, pd.DataFrame] the data object, corresponding to the data_format attribute. train, test : Tuple[Union[runml_checks.Dataset, pd.DataFrame],Union[runml_checks.Dataset, pd.DataFrame] tuple if as_train_test = True. Tuple of two objects represents the dataset splitted to train and test sets. """ if not as_train_test: dataset = pd.read_csv(_FULL_DATA_URL, names=_FEATURES + [_target]) if data_format == 'Dataset': dataset = Dataset(dataset, label=_target, cat_features=_CAT_FEATURES) return dataset elif data_format == 'Dataframe': return dataset else: raise ValueError('data_format must be either "Dataset" or "Dataframe"') else: train = pd.read_csv(_TRAIN_DATA_URL, names=_FEATURES + [_target]) test = pd.read_csv(_TEST_DATA_URL, skiprows=1, names=_FEATURES + [_target]) test[_target] = test[_target].str[:-1] if data_format == 'Dataset': train = Dataset(train, label=_target, cat_features=_CAT_FEATURES) test = Dataset(test, label=_target, cat_features=_CAT_FEATURES) return train, test elif data_format == 'Dataframe': return train, test else: raise ValueError('data_format must be either "Dataset" or "Dataframe"') def load_fitted_model(pretrained=True): """Load and return a fitted classification model. Returns ------- model : Joblib The model/pipeline that was trained on the adult dataset. """ if sklearn.__version__ == _MODEL_VERSION and pretrained: with urlopen(_MODEL_URL) as f: model = joblib.load(f) else: model = _build_model() train, _ = load_data() model.fit(train.data[train.features], train.data[train.label_name]) return model def _build_model(): """Build the model to fit.""" numeric_transformer = SimpleImputer() categorical_transformer = Pipeline( steps=[('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', OrdinalEncoder())] ) preprocessor = ColumnTransformer( transformers=[ ('num', numeric_transformer, _NUM_FEATURES), ('cat', categorical_transformer, _CAT_FEATURES), ] ) model = Pipeline( steps=[ ('preprocessing', preprocessor), ('model', RandomForestClassifier(max_depth=5, n_jobs=-1, random_state=0)) ] ) return model
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/datasets/classification/adult.py
0.910598
0.680786
adult.py
pypi
"""The data set contains features for binary prediction of breast cancer.""" import typing as t from urllib.request import urlopen import joblib import pandas as pd import sklearn from sklearn.ensemble import AdaBoostClassifier from runml_checks.tabular.dataset import Dataset __all__ = ['load_data', 'load_fitted_model'] _MODEL_URL = 'https://figshare.com/ndownloader/files/35122759' _FULL_DATA_URL = 'https://ndownloader.figshare.com/files/33325472' _TRAIN_DATA_URL = 'https://ndownloader.figshare.com/files/33325556' _TEST_DATA_URL = 'https://ndownloader.figshare.com/files/33325559' _MODEL_VERSION = '1.0.2' _target = 'target' _CAT_FEATURES = [] def load_data(data_format: str = 'Dataset', as_train_test: bool = True) -> \ t.Union[t.Tuple, t.Union[Dataset, pd.DataFrame]]: """Load and returns the Breast Cancer dataset (classification). The data has 569 patient records with 30 features and one binary target column, referring to the presence of breast cancer in the patient. This is a copy of UCI ML Breast Cancer Wisconsin (Diagnostic) datasets. https://goo.gl/U2Uwz2 Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. Separating plane described above was obtained using Multisurface Method-Tree (MSM-T) [K. P. Bennett, “Decision Tree Construction Via Linear Programming.” Proceedings of the 4th Midwest Artificial Intelligence and Cognitive Science Society, pp. 97-101, 1992], a classification method which uses linear programming to construct a decision tree. Relevant features were selected using an exhaustive search in the space of 1-4 features and 1-3 separating planes. The actual linear program used to obtain the separating plane in the 3-dimensional space is that described in: [ K. P. Bennett and O. L. Mangasarian: “Robust Linear Programming Discrimination of Two Linearly Inseparable Sets”, Optimization Methods and Software 1, 1992, 23-34]. This database is also available through the UW CS ftp server: ftp ftp.cs.wisc.edu cd math-prog/cpo-dataset/machine-learn/WDBC/ References: * W.N. Street, W.H. Wolberg and O.L. Mangasarian. Nuclear feature extraction for breast tumor diagnosis. IS&T/SPIE 1993 International Symposium on Electronic Imaging: Science and Technology, volume 1905, pages 861-870, San Jose, CA, 1993. * O.L. Mangasarian, W.N. Street and W.H. Wolberg. Breast cancer diagnosis and prognosis via linear programming. Operations Research, 43(4), pages 570-577, July-August 1995. * W.H. Wolberg, W.N. Street, and O.L. Mangasarian. Machine learning techniques to diagnose breast cancer from fine-needle aspirates. Cancer Letters 77 (1994) 163-171. The typical ML task in this dataset is to build a model that classifies between benign and malignant samples. Ten real-valued features are computed for each cell nucleus: #. radius (mean of distances from center to points on the perimeter) #. texture (standard deviation of gray-scale values) #. perimeter #. area #. smoothness (local variation in radius lengths) #. compactness (perimeter^2 / area - 1.0) #. concavity (severity of concave portions of the contour) #. concave points (number of concave portions of the contour) #. symmetry #. fractal dimension ("coastline approximation" - 1) Dataset Shape: .. list-table:: Dataset Shape :widths: 50 50 :header-rows: 1 * - Property - Value * - Samples Total - 569 * - Dimensionality - 30 * - Features - real * - Targets - boolean Description: .. list-table:: Dataset Description :widths: 50 50 50 :header-rows: 1 * - mean radius - Feature - mean radius * - mean texture - Feature - mean texture * - mean perimeter - Feature - mean perimeter * - mean area - Feature - mean area * - mean smoothness - Feature - mean smoothness * - mean compactness - Feature - mean compactness * - mean concavity - Feature - mean concavity * - mean concave points - Feature - mean concave points * - mean symmetry - Feature - mean symmetry * - mean fractal dimension - Feature - mean fractal dimension * - radius error - Feature - radius error * - texture error - Feature - texture error * - perimeter error - Feature - perimeter error * - area error - Feature - area error * - smoothness error - Feature - smoothness error * - compactness error - Feature - compactness error * - concavity error - Feature - concavity error * - concave points error - Feature - concave points error * - symmetry error - Feature - symmetry error * - fractal dimension error - Feature - fractal dimension error * - worst radius - Feature - worst radius * - worst texture - Feature - worst texture * - worst perimeter - Feature - worst perimeter * - worst area - Feature - worst area * - worst smoothness - Feature - worst smoothness * - worst compactness - Feature - worst compactness * - worst concavity - Feature - worst concavity * - worst concave points - Feature - worst concave points * - worst symmetry - Feature - worst symmetry * - worst fractal dimension - Feature - worst fractal dimension * - target - Label - The class (Benign, Malignant) Parameters ---------- data_format : str, default: 'Dataset' Represent the format of the returned value. Can be 'Dataset'|'Dataframe' 'Dataset' will return the data as a Dataset object 'Dataframe' will return the data as a pandas Dataframe object as_train_test : bool, default: True If True, the returned data is splitted into train and test exactly like the toy model was trained. The first return value is the train data and the second is the test data. In order to get this model, call the load_fitted_model() function. Otherwise, returns a single object. Returns ------- dataset : Union[runml_checks.Dataset, pd.DataFrame] the data object, corresponding to the data_format attribute. train, test : Tuple[Union[runml_checks.Dataset, pd.DataFrame],Union[runml_checks.Dataset, pd.DataFrame] tuple if as_train_test = True. Tuple of two objects represents the dataset splitted to train and test sets. """ if not as_train_test: dataset = pd.read_csv(_FULL_DATA_URL) if data_format == 'Dataset': dataset = Dataset(dataset, label=_target, cat_features=_CAT_FEATURES) return dataset elif data_format == 'Dataframe': return dataset else: raise ValueError('data_format must be either "Dataset" or "Dataframe"') else: train = pd.read_csv(_TRAIN_DATA_URL) test = pd.read_csv(_TEST_DATA_URL) if data_format == 'Dataset': train = Dataset(train, label=_target, cat_features=_CAT_FEATURES) test = Dataset(test, label=_target, cat_features=_CAT_FEATURES) return train, test elif data_format == 'Dataframe': return train, test else: raise ValueError('data_format must be either "Dataset" or "Dataframe"') def load_fitted_model(pretrained=True): """Load and return a fitted classification model to predict the flower type in the iris dataset. Returns ------- model : Joblib The model/pipeline that was trained on the iris dataset. """ if sklearn.__version__ == _MODEL_VERSION and pretrained: with urlopen(_MODEL_URL) as f: model = joblib.load(f) else: model = _build_model() train, _ = load_data() model.fit(train.data[train.features], train.data[train.label_name]) return model def _build_model(): """Build the model to fit.""" return AdaBoostClassifier(random_state=0)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/datasets/classification/breast_cancer.py
0.932928
0.690716
breast_cancer.py
pypi
"""The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant.""" import typing as t from urllib.request import urlopen import joblib import pandas as pd import sklearn from sklearn.ensemble import RandomForestClassifier from runml_checks.tabular.dataset import Dataset __all__ = ['load_data', 'load_fitted_model'] _MODEL_URL = 'https://figshare.com/ndownloader/files/35122762' _FULL_DATA_URL = 'https://figshare.com/ndownloader/files/32652977' _TRAIN_DATA_URL = 'https://figshare.com/ndownloader/files/32653172' _TEST_DATA_URL = 'https://figshare.com/ndownloader/files/32653130' _MODEL_VERSION = '1.0.2' _target = 'target' _CAT_FEATURES = [] def load_data(data_format: str = 'Dataset', as_train_test: bool = True) -> \ t.Union[t.Tuple, t.Union[Dataset, pd.DataFrame]]: """Load and returns the Iris dataset (classification). The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant. One class is linearly separable from the other 2; the latter are NOT linearly separable from each other. References ---------- * Fisher, R.A. “The use of multiple measurements in taxonomic problems” Annual Eugenics, 7, Part II, 179-188 (1936); also in “Contributions to Mathematical Statistics” (John Wiley, NY, 1950). * Duda, R.O., & Hart, P.E. (1973) Pattern Classification and Scene Analysis. (Q327.D83) John Wiley & Sons. ISBN 0-471-22361-1. See page 218. * And many more.. The typical ML task in this dataset is to build a model that classifies the type of flower. Dataset Shape: .. list-table:: Dataset Shape :widths: 50 50 :header-rows: 1 * - Property - Value * - Samples Total - 150 * - Dimensionality - 4 * - Features - real * - Targets - 3 * - Samples per class - 50 Description: .. list-table:: Dataset Description :widths: 50 50 50 :header-rows: 1 * - Column name - Column Role - Description * - sepal length (cm) - Feature - The length of the flower's sepal (in cm) * - sepal width (cm) - Feature - The width of the flower's sepal (in cm) * - petal length (cm) - Feature - The length of the flower's petal (in cm) * - petal width (cm) - Feature - The width of the flower's petal (in cm) * - target - Label - The class (Setosa,Versicolour,Virginica) Parameters ---------- data_format : str , default: Dataset Represent the format of the returned value. Can be 'Dataset'|'Dataframe' 'Dataset' will return the data as a Dataset object 'Dataframe' will return the data as a pandas Dataframe object as_train_test : bool , default: True If True, the returned data is splitted into train and test exactly like the toy model was trained. The first return value is the train data and the second is the test data. In order to get this model, call the load_fitted_model() function. Otherwise, returns a single object. Returns ------- dataset : Union[runml_checks.Dataset, pd.DataFrame] the data object, corresponding to the data_format attribute. train, test : Tuple[Union[runml_checks.Dataset, pd.DataFrame],Union[runml_checks.Dataset, pd.DataFrame] tuple if as_train_test = True. Tuple of two objects represents the dataset splitted to train and test sets. """ if not as_train_test: dataset = pd.read_csv(_FULL_DATA_URL) if data_format == 'Dataset': dataset = Dataset(dataset, label=_target, cat_features=_CAT_FEATURES) return dataset else: train = pd.read_csv(_TRAIN_DATA_URL) test = pd.read_csv(_TEST_DATA_URL) if data_format == 'Dataset': train = Dataset(train, label=_target, cat_features=_CAT_FEATURES, label_type='classification_label') test = Dataset(test, label=_target, cat_features=_CAT_FEATURES, label_type='classification_label') return train, test def load_fitted_model(pretrained=True): """Load and return a fitted classification model to predict the flower type in the iris dataset. Returns ------- model : Joblib the model/pipeline that was trained on the iris dataset. """ if sklearn.__version__ == _MODEL_VERSION and pretrained: with urlopen(_MODEL_URL) as f: model = joblib.load(f) else: model = _build_model() train, _ = load_data() model.fit(train.data[train.features], train.data[train.label_name]) return model def _build_model(): """Build the model to fit.""" return RandomForestClassifier(random_state=0)
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/datasets/classification/iris.py
0.943263
0.66792
iris.py
pypi
"""The phishing dataset contains a slightly synthetic dataset of urls - some regular and some used for phishing.""" import typing as t from urllib.request import urlopen import joblib import pandas as pd import sklearn from category_encoders import OneHotEncoder from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from runml_checks.tabular.dataset import Dataset __all__ = ['load_data', 'load_fitted_model'] _MODEL_URL = 'https://figshare.com/ndownloader/files/35122765' _FULL_DATA_URL = 'https://figshare.com/ndownloader/files/33079757' _TRAIN_DATA_URL = 'https://ndownloader.figshare.com/files/33079781' _TEST_DATA_URL = 'https://ndownloader.figshare.com/files/33079787' _MODEL_VERSION = '1.0.2' _target = 'target' _CAT_FEATURES = ['ext'] _NON_FEATURES = ['month', 'has_ip', 'urlIsLive'] _NUM_FEATURES = ['urlLength', 'numDigits', 'numParams', 'num_%20', 'num_@', 'entropy', 'hasHttp', 'hasHttps', 'dsr', 'dse', 'bodyLength', 'numTitles', 'numImages', 'numLinks', 'specialChars', 'scriptLength', 'sbr', 'bscr', 'sscr'] _DATE_COL = 'scrape_date' def load_data(data_format: str = 'Dataset', as_train_test: bool = True) -> \ t.Union[t.Tuple, t.Union[Dataset, pd.DataFrame]]: """Load and returns the phishing url dataset (classification). The phishing url dataset contains slightly synthetic dataset of urls - some regular and some used for phishing. The dataset is based on the `great project <https://github.com/Rohith-2/url_classification_dl>`_ by `Rohith Ramakrishnan <https://www.linkedin.com/in/rohith-ramakrishnan-54094a1a0/>`_ and others, accompanied by a `blog post <https://medium.com/nerd-for-tech/url-feature-engineering-and-classification-66c0512fb34d>`_. The authors have released it under an open license per our request, and for that we are very grateful to them. This dataset is licensed under the `Creative Commons Zero v1.0 Universal (CC0 1.0) <https://creativecommons.org/publicdomain/zero/1.0/>`_. The typical ML task in this dataset is to build a model that predicts the if the url is part of a phishing attack. Dataset Shape: .. list-table:: Dataset Shape :widths: 50 50 :header-rows: 1 * - Property - Value * - Samples Total - 11.35K * - Dimensionality - 25 * - Features - real, string * - Targets - boolean Description: .. list-table:: Dataset Description :widths: 50 50 50 :header-rows: 1 * - Column name - Column Role - Description * - target - Label - 0 if the URL is benign, 1 if it is related to phishing * - month - Data - The month this URL was first encountered, as an int * - scrape_date - Date - The exact date this URL was first encountered * - ext - Feature - The domain extension * - urlLength - Feature - The number of characters in the URL * - numDigits - Feature - The number of digits in the URL * - numParams - Feature - The number of query parameters in the URL * - num_%20 - Feature - The number of '%20' substrings in the URL * - num_@ - Feature - The number of @ characters in the URL * - entropy - Feature - The entropy of the URL * - has_ip - Feature - True if the URL string contains an IP address * - hasHttp - Feature - True if the url's domain supports http * - hasHttps - Feature - True if the url's domain supports https * - urlIsLive - Feature - The URL was live at the time of scraping * - dsr - Feature - The number of days since domain registration * - dse - Feature - The number of days since domain registration expired * - bodyLength - Feature - The number of characters in the URL's web page * - numTitles - Feature - The number of HTML titles (H1/H2/...) in the page * - numImages - Feature - The number of images in the page * - numLinks - Feature - The number of links in the page * - specialChars - Feature - The number of special characters in the page * - scriptLength - Feature - The number of characters in scripts embedded in the page * - sbr - Feature - The ratio of scriptLength to bodyLength (`= scriptLength / bodyLength`) * - bscr - Feature - The ratio of bodyLength to specialChars (`= specialChars / bodyLength`) * - sscr - Feature - The ratio of scriptLength to specialChars (`= scriptLength / specialChars`) Parameters ---------- data_format : str , default: Dataset Represent the format of the returned value. Can be 'Dataset'|'Dataframe' 'Dataset' will return the data as a Dataset object 'Dataframe' will return the data as a pandas Dataframe object as_train_test : bool , default: True If True, the returned data is splitted into train and test exactly like the toy model was trained. The first return value is the train data and the second is the test data. In order to get this model, call the load_fitted_model() function. Otherwise, returns a single object. Returns ------- dataset : Union[runml_checks.Dataset, pd.DataFrame] the data object, corresponding to the data_format attribute. train, test : Tuple[Union[runml_checks.Dataset, pd.DataFrame],Union[runml_checks.Dataset, pd.DataFrame] tuple if as_train_test = True. Tuple of two objects represents the dataset splitted to train and test sets. """ if not as_train_test: dataset = pd.read_csv(_FULL_DATA_URL, index_col=0) if data_format == 'Dataset': dataset = Dataset(dataset, label=_target, cat_features=_CAT_FEATURES, datetime_name=_DATE_COL) return dataset else: train = pd.read_csv(_TRAIN_DATA_URL, index_col=0) test = pd.read_csv(_TEST_DATA_URL, index_col=0) if data_format == 'Dataset': train = Dataset(train, label=_target, cat_features=_CAT_FEATURES, datetime_name=_DATE_COL) test = Dataset(test, label=_target, cat_features=_CAT_FEATURES, datetime_name=_DATE_COL) return train, test def load_fitted_model(pretrained=True): """Load and return a fitted regression model to predict the target in the phishing dataset. Returns ------- model : Joblib the model/pipeline that was trained on the phishing dataset. """ if sklearn.__version__ == _MODEL_VERSION and pretrained: with urlopen(_MODEL_URL) as f: model = joblib.load(f) else: model = _build_model() train, _ = load_data() model.fit(train.data[train.features], train.data[train.label_name]) return model class UrlDatasetProcessor: """A custom processing pipeline for the phishing URLs dataset.""" def _cols_to_scale(self, df: pd.DataFrame) -> t.List[object]: return [ i for i, x in df.dtypes.items() if pd.api.types.is_numeric_dtype(x) and i != _target ] def _shared_preprocess(self, df: pd.DataFrame) -> pd.DataFrame: df = df.copy() df[_DATE_COL] = pd.to_datetime( df[_DATE_COL], format='%Y-%m-%d') df = df.set_index(keys=_DATE_COL, drop=True) df = df.drop(_NON_FEATURES, axis=1) df = pd.get_dummies(df, columns=['ext']) return df def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame: """Fit this preprossor on the input dataframe and transform it.""" df = self._shared_preprocess(df) self.scaler = sklearn.preprocessing.StandardScaler() self.scale_cols = self._cols_to_scale(df) df[self.scale_cols] = self.scaler.fit_transform(df[self.scale_cols]) return df def transform(self, df: pd.DataFrame) -> pd.DataFrame: """Transform the input dataframe using this fitted preprossor.""" df = self._shared_preprocess(df) try: df[self.scale_cols] = self.scaler.transform(df[self.scale_cols]) return df except AttributeError as e: raise Exception( 'UrlDatasetProcessor is unfitted! Call fit_transform() first!' ) from e def get_url_preprocessor(): """Return a data processor object for the phishing URL dataset.""" return UrlDatasetProcessor() def _build_model(): """Build the model to fit.""" return Pipeline(steps=[ ('preprocessing', ColumnTransformer(transformers=[('num', SimpleImputer(), _NUM_FEATURES), ('cat', Pipeline(steps=[('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', OneHotEncoder())]), _CAT_FEATURES)])), ('model', RandomForestClassifier(criterion='entropy', n_estimators=40, random_state=0))])
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/datasets/classification/phishing.py
0.918763
0.584004
phishing.py
pypi
"""The data set contains features for binary prediction of whether a loan will be approved or not.""" import typing as t import warnings from urllib.request import urlopen import joblib import numpy as np import pandas as pd import sklearn from category_encoders import OrdinalEncoder from sklearn.compose import ColumnTransformer with warnings.catch_warnings(): warnings.simplefilter('ignore') from sklearn.experimental import enable_hist_gradient_boosting # noqa # pylint: disable=unused-import from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.pipeline import Pipeline from runml_checks.tabular.dataset import Dataset __all__ = ['load_data', 'load_fitted_model'] from runml_checks.utils.function import run_available_kwargs _MODEL_URL = 'https://ndownloader.figshare.com/files/35692190' _FULL_DATA_URL = 'https://ndownloader.figshare.com/files/35685218' _TRAIN_DATA_URL = 'https://ndownloader.figshare.com/files/35684222' _TEST_DATA_URL = 'https://ndownloader.figshare.com/files/35684816' _MODEL_VERSION = '1.0.2' _target = 'loan_status' _datetime_name = 'issue_d' _index_name = 'id' _CAT_FEATURES = ['addr_state', 'application_type', 'home_ownership', 'initial_list_status', 'purpose', 'term', 'verification_status', 'sub_grade'] _NUM_FEATURES = ['fico_range_low', 'total_acc', 'pub_rec', 'revol_util', 'annual_inc', 'int_rate', 'dti', 'mort_acc', 'loan_amnt', 'installment', 'pub_rec_bankruptcies', 'fico_range_high', 'revol_bal', 'open_acc', 'emp_length', 'time_to_earliest_cr_line'] _FEATURES = _CAT_FEATURES + _NUM_FEATURES def load_data(data_format: str = 'Dataset', as_train_test: bool = True) -> t.Union[ t.Tuple, t.Union[Dataset, pd.DataFrame]]: """Load and returns part of the Lending club dataset (classification). The partial data has 21668 records with 26 features and one binary target column, referring to whether the specified loan was approved. The partial data set contains the records from the years 2017,2018 for the months of June, July and August. The train set are the records from 2017 and the test set consists of the records from 2018. This is a partial copy of the dataset supplied in: https://www.kaggle.com/datasets/wordsforthewise/lending-club The typical ML task in this dataset is to build a model that determines whether a loan will be approved. For further details regarding the dataset features see https://figshare.com/articles/dataset/Lending_club_dataset_description/20016077 Parameters ---------- data_format : str, default: 'Dataset' Represent the format of the returned value. Can be 'Dataset'|'Dataframe' 'Dataset' will return the data as a Dataset object 'Dataframe' will return the data as a pandas Dataframe object as_train_test : bool, default: True If True, the returned data is splitted into train and test exactly like the toy model was trained. The first return value is the train data and the second is the test data. In order to get this model, call the load_fitted_model() function. Otherwise, returns a single object. Returns ------- dataset : Union[runml_checks.Dataset, pd.DataFrame] the data object, corresponding to the data_format attribute. train, test : Tuple[Union[runml_checks.Dataset, pd.DataFrame],Union[runml_checks.Dataset, pd.DataFrame] tuple if as_train_test = True. Tuple of two objects represents the dataset splitted to train and test sets. """ if not as_train_test: dataset = pd.read_csv(_FULL_DATA_URL, index_col=False) if data_format == 'Dataset': dataset = Dataset(dataset, label=_target, cat_features=_CAT_FEATURES, index_name=_index_name, datetime_name=_datetime_name) return dataset elif data_format == 'Dataframe': return dataset else: raise ValueError('data_format must be either "Dataset" or "Dataframe"') else: train = pd.read_csv(_TRAIN_DATA_URL, index_col=False) test = pd.read_csv(_TEST_DATA_URL, index_col=False) if data_format == 'Dataset': train = Dataset(train, label=_target, cat_features=_CAT_FEATURES, index_name=_index_name, datetime_name=_datetime_name) test = Dataset(test, label=_target, cat_features=_CAT_FEATURES, index_name=_index_name, datetime_name=_datetime_name) return train, test elif data_format == 'Dataframe': return train, test else: raise ValueError('data_format must be either "Dataset" or "Dataframe"') def load_fitted_model(pretrained=True): """Load and return a fitted classification model. Returns ------- model : Joblib The model/pipeline that was trained on the adult dataset. """ if sklearn.__version__ == _MODEL_VERSION and pretrained: with urlopen(_MODEL_URL) as f: model = joblib.load(f) else: model = _build_model() train, _ = load_data() model.fit(train.features_columns, train.data[_target]) return model def _build_model(): """Build the model to fit.""" categorical_transformer = Pipeline(steps=[('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value', unknown_value=np.nan, dtype=np.float64))]) preprocessor = ColumnTransformer( transformers=[('num', 'passthrough', _NUM_FEATURES), ('cat', categorical_transformer, _CAT_FEATURES), ]) model = Pipeline(steps=[('preprocessing', preprocessor), ('model', run_available_kwargs(HistGradientBoostingClassifier, max_depth=5, max_iter=200, random_state=42, categorical_features=[False] * len( _NUM_FEATURES) + [True] * len( _CAT_FEATURES)))]) return model
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/datasets/classification/lending_club.py
0.875388
0.541045
lending_club.py
pypi
"""Tabular objects validation utilities.""" import typing as t import numpy as np import pandas as pd from runml_checks import tabular from runml_checks.core import errors from runml_checks.utils.typing import BasicModel __all__ = [ 'model_type_validation', 'validate_model', 'ensure_dataframe_type', 'ensure_predictions_shape', 'ensure_predictions_proba', ] supported_models_link = ('https://docs.runml_checks.com/en/stable/user-guide/supported_models.html' '?utm_source=display_output&utm_medium=referral&utm_campaign=exception_link') supported_models_html = f'<a href="{supported_models_link}" target="_blank">supported model types</a>' def model_type_validation(model: t.Any): """Receive any object and check if it's an instance of a model we support. Parameters ---------- model: t.Any Raises ------ runml_checksValueError If the object is not of a supported type """ if not isinstance(model, BasicModel): raise errors.ModelValidationError( f'Model supplied does not meets the minimal interface requirements. Read more about {supported_models_html}' ) def validate_model( data: 'tabular.Dataset', model: t.Any ): """Check model is able to predict on the dataset. Parameters ---------- data : t.Union['base.Dataset', pd.DataFrame] model : t.Any Raises ------ runml_checksValueError if dataset does not match model. """ error_message = ( 'In order to evaluate model correctness we need not empty dataset ' 'with the same set of features that was used to fit the model. {0}' ) if isinstance(data, tabular.Dataset): features = data.data[data.features] else: features = data if features is None: raise errors.runml_checksValueError(error_message.format( 'But function received dataset without feature columns.' )) if len(features) == 0: raise errors.runml_checksValueError(error_message.format( 'But function received empty dataset.' )) try: model.predict(features.head(1)) except Exception as exc: raise errors.ModelValidationError( f'Got error when trying to predict with model on dataset: {str(exc)}' ) def ensure_dataframe_type(obj: t.Any) -> pd.DataFrame: """Ensure that given object is of type DataFrame or Dataset and return it as DataFrame. else raise error. Parameters ---------- obj : t.Any Object to ensure it is DataFrame or Dataset Returns ------- pd.DataFrame """ if isinstance(obj, pd.DataFrame): return obj elif isinstance(obj, tabular.Dataset): return obj.data else: raise errors.runml_checksValueError( f'dataset must be of type DataFrame or Dataset, but got: {type(obj).__name__}' ) def ensure_predictions_shape(pred: np.ndarray, data: pd.DataFrame) -> np.ndarray: """Ensure the predictions are in the right shape and if so return them. else raise error.""" if pred.shape != (len(data), ): raise errors.ValidationError(f'Prediction array excpected to be of shape {(len(data), )} ' f'but was: {pred.shape}') return pred def ensure_predictions_proba(pred_proba: np.ndarray, data: pd.DataFrame) -> np.ndarray: """Ensure the predictions are in the right shape and if so return them. else raise error.""" if len(pred_proba) != len(data): raise errors.ValidationError(f'Prediction propabilities excpected to be of length {len(data)} ' f'but was: {len(pred_proba)}') return pred_proba
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/utils/validation.py
0.906259
0.566678
validation.py
pypi
import warnings from typing import Callable, Dict, List, Union from runml_checks.tabular import Suite from runml_checks.tabular.checks import (BoostingOverfit, CalibrationScore, CategoryMismatchTrainTest, ConflictingLabels, ConfusionMatrixReport, DataDuplicates, DatasetsSizeComparison, DateTrainTestLeakageDuplicates, DateTrainTestLeakageOverlap, FeatureLabelCorrelation, FeatureLabelCorrelationChange, IdentifierLabelCorrelation, IndexTrainTestLeakage, IsSingleValue, MixedDataTypes, MixedNulls, ModelInferenceTime, NewLabelTrainTest, OutlierSampleDetection, PerformanceReport, RegressionErrorDistribution, RegressionSystematicError, RocReport, SimpleModelComparison, SpecialCharacters, StringLengthOutOfBounds, StringMismatch, StringMismatchComparison, TrainTestFeatureDrift, TrainTestLabelDrift, TrainTestPredictionDrift, TrainTestSamplesMix, UnusedFeatures, WeakSegmentsPerformance, WholeDatasetDrift) __all__ = ['single_dataset_integrity', 'train_test_leakage', 'train_test_validation', 'model_evaluation', 'full_suite'] from runml_checks.utils.typing import Hashable def single_dataset_integrity(**kwargs) -> Suite: """ Create a suite that is meant to detect integrity issues within a single dataset (Deprecated). .. deprecated:: 0.7.0 `single_dataset_integrity` is deprecated and will be removed in runml_checks 0.8 version, it is replaced by `data_integrity` suite. """ warnings.warn( 'the single_dataset_integrity suite is deprecated, use the data_integrity suite instead', DeprecationWarning ) return data_integrity(**kwargs) def data_integrity(columns: Union[Hashable, List[Hashable]] = None, ignore_columns: Union[Hashable, List[Hashable]] = None, n_top_columns: int = None, n_samples: int = None, random_state: int = 42, n_to_show: int = 5, **kwargs) -> Suite: """Suite for detecting integrity issues within a single dataset. List of Checks: .. list-table:: List of Checks :widths: 50 50 :header-rows: 1 * - Check Example - API Reference * - :ref:`plot_tabular_is_single_value` - :class:`~runml_checks.tabular.checks.data_integrity.IsSingleValue` * - :ref:`plot_tabular_special_chars` - :class:`~runml_checks.tabular.checks.data_integrity.SpecialCharacters` * - :ref:`plot_tabular_mixed_nulls` - :class:`~runml_checks.tabular.checks.data_integrity.MixedNulls` * - :ref:`plot_tabular_mixed_data_types` - :class:`~runml_checks.tabular.checks.data_integrity.MixedDataTypes` * - :ref:`plot_tabular_string_mismatch` - :class:`~runml_checks.tabular.checks.data_integrity.StringMismatch` * - :ref:`plot_tabular_data_duplicates` - :class:`~runml_checks.tabular.checks.data_integrity.DataDuplicates` * - :ref:`plot_tabular_string_length_out_of_bounds` - :class:`~runml_checks.tabular.checks.data_integrity.StringLengthOutOfBounds` * - :ref:`plot_tabular_conflicting_labels` - :class:`~runml_checks.tabular.checks.data_integrity.ConflictingLabels` * - :ref:`plot_tabular_outlier_sample_detection` - :class:`~runml_checks.tabular.checks.data_integrity.OutlierSampleDetection` * - :ref:`plot_tabular_feature_label_correlation` - :class:`~runml_checks.tabular.checks.data_integrity.FeatureLabelCorrelation` * - :ref:`plot_tabular_identifier_label_correlation` - :class:`~runml_checks.tabular.checks.data_integrity.IdentifierLabelCorrelation` Parameters ---------- columns : Union[Hashable, List[Hashable]] , default: None The columns to be checked. If None, all columns will be checked except the ones in `ignore_columns`. ignore_columns : Union[Hashable, List[Hashable]] , default: None The columns to be ignored. If None, no columns will be ignored. n_top_columns : int , optional number of columns to show ordered by feature importance (date, index, label are first) (check dependent) n_samples : int , default: 1_000_000 number of samples to use for checks that sample data. If none, using the default n_samples per check. random_state : int, default: 42 random seed for all checks. n_to_show : int , default: 5 number of top results to show (check dependent) **kwargs : dict additional arguments to pass to the checks. Returns ------- Suite A suite for detecting integrity issues within a single dataset. Examples -------- >>> from runml_checks.tabular.suites import data_integrity >>> suite = data_integrity(columns=['a', 'b', 'c'], n_samples=1_000_000) >>> result = suite.run() >>> result.show() See Also -------- :ref:`quick_data_integrity` """ args = locals() args.pop('kwargs') non_none_args = {k: v for k, v in args.items() if v is not None} kwargs = {**non_none_args, **kwargs} return Suite( 'Data Integrity Suite', IsSingleValue(**kwargs).add_condition_not_single_value(), SpecialCharacters(**kwargs).add_condition_ratio_of_special_characters_less_or_equal(), MixedNulls(**kwargs).add_condition_different_nulls_less_equal_to(), MixedDataTypes(**kwargs).add_condition_rare_type_ratio_not_in_range(), StringMismatch(**kwargs).add_condition_no_variants(), DataDuplicates(**kwargs).add_condition_ratio_less_or_equal(), StringLengthOutOfBounds(**kwargs).add_condition_ratio_of_outliers_less_or_equal(), ConflictingLabels(**kwargs).add_condition_ratio_of_conflicting_labels_less_or_equal(), OutlierSampleDetection(**kwargs), FeatureLabelCorrelation(**kwargs).add_condition_feature_pps_less_than(), IdentifierLabelCorrelation(**kwargs).add_condition_pps_less_or_equal() ) def train_test_leakage(**kwargs) -> Suite: """Create a suite for detecting data leakage between the training dataset and the test dataset (Deprecated). .. deprecated:: 0.7.0 `train_test_leakage` is deprecated and will be removed in runml_checks 0.8 version, it is replaced by `train_test_validation` suite. """ warnings.warn( 'the train_test_leakage suite is deprecated, use the train_test_validation suite instead', DeprecationWarning ) return train_test_validation(**kwargs) def train_test_validation(columns: Union[Hashable, List[Hashable]] = None, ignore_columns: Union[Hashable, List[Hashable]] = None, n_top_columns: int = None, n_samples: int = None, random_state: int = 42, n_to_show: int = 5, **kwargs) -> Suite: """Suite for validating correctness of train-test split, including distribution, \ leakage and integrity checks. List of Checks: .. list-table:: List of Checks :widths: 50 50 :header-rows: 1 * - Check Example - API Reference * - :ref:`plot_tabular_datasets_size_comparison` - :class:`~runml_checks.tabular.checks.train_test_validation.DatasetsSizeComparison` * - :ref:`plot_tabular_new_label` - :class:`~runml_checks.tabular.checks.train_test_validation.NewLabelTrainTest` * - :ref:`plot_tabular_new_category` - :class:`~runml_checks.tabular.checks.train_test_validation.CategoryMismatchTrainTest` * - :ref:`plot_tabular_string_mismatch_comparison` - :class:`~runml_checks.tabular.checks.train_test_validation.StringMismatchComparison` * - :ref:`plot_tabular_date_train_test_validation_leakage_duplicates` - :class:`~runml_checks.tabular.checks.train_test_validation.DateTrainTestLeakageDuplicates` * - :ref:`plot_tabular_date_train_test_validation_leakage_overlap` - :class:`~runml_checks.tabular.checks.train_test_validation.DateTrainTestLeakageOverlap` * - :ref:`plot_tabular_index_leakage` - :class:`~runml_checks.tabular.checks.train_test_validation.IndexTrainTestLeakage` * - :ref:`plot_tabular_train_test_samples_mix` - :class:`~runml_checks.tabular.checks.train_test_validation.TrainTestSamplesMix` * - :ref:`plot_tabular_feature_label_correlation_change` - :class:`~runml_checks.tabular.checks.train_test_validation.FeatureLabelCorrelationChange` * - :ref:`plot_tabular_train_test_feature_drift` - :class:`~runml_checks.tabular.checks.train_test_validation.TrainTestFeatureDrift` * - :ref:`plot_tabular_train_test_label_drift` - :class:`~runml_checks.tabular.checks.train_test_validation.TrainTestLabelDrift` * - :ref:`plot_tabular_whole_dataset_drift` - :class:`~runml_checks.tabular.checks.train_test_validation.WholeDatasetDrift` Parameters ---------- columns : Union[Hashable, List[Hashable]] , default: None The columns to be checked. If None, all columns will be checked except the ones in `ignore_columns`. ignore_columns : Union[Hashable, List[Hashable]] , default: None The columns to be ignored. If None, no columns will be ignored. n_top_columns : int , optional number of columns to show ordered by feature importance (date, index, label are first) (check dependent) n_samples : int , default: None number of samples to use for checks that sample data. If none, using the default n_samples per check. random_state : int, default: 42 random seed for all checkss. n_to_show : int , default: 5 number of top results to show (check dependent) **kwargs : dict additional arguments to pass to the checks. Returns ------- Suite A suite for validating correctness of train-test split, including distribution, \ leakage and integrity checks. Examples -------- >>> from runml_checks.tabular.suites import train_test_validation >>> suite = train_test_validation(columns=['a', 'b', 'c'], n_samples=1_000_000) >>> result = suite.run() >>> result.show() See Also -------- :ref:`quick_train_test_validation` """ args = locals() args.pop('kwargs') non_none_args = {k: v for k, v in args.items() if v is not None} kwargs = {**non_none_args, **kwargs} return Suite( 'Train Test Validation Suite', DatasetsSizeComparison(**kwargs).add_condition_test_train_size_ratio_greater_than(), NewLabelTrainTest(**kwargs).add_condition_new_labels_number_less_or_equal(), CategoryMismatchTrainTest(**kwargs).add_condition_new_category_ratio_less_or_equal(), StringMismatchComparison(**kwargs).add_condition_no_new_variants(), DateTrainTestLeakageDuplicates(**kwargs).add_condition_leakage_ratio_less_or_equal(), DateTrainTestLeakageOverlap(**kwargs).add_condition_leakage_ratio_less_or_equal(), IndexTrainTestLeakage(**kwargs).add_condition_ratio_less_or_equal(), TrainTestSamplesMix(**kwargs).add_condition_duplicates_ratio_less_or_equal(), FeatureLabelCorrelationChange(**kwargs).add_condition_feature_pps_difference_less_than() .add_condition_feature_pps_in_train_less_than(), TrainTestFeatureDrift(**kwargs).add_condition_drift_score_less_than(), TrainTestLabelDrift(**kwargs).add_condition_drift_score_less_than(), WholeDatasetDrift(**kwargs).add_condition_overall_drift_value_less_than(), ) def model_evaluation(alternative_scorers: Dict[str, Callable] = None, columns: Union[Hashable, List[Hashable]] = None, ignore_columns: Union[Hashable, List[Hashable]] = None, n_top_columns: int = None, n_samples: int = None, random_state: int = 42, n_to_show: int = 5, **kwargs) -> Suite: """Suite for evaluating the model's performance over different metrics, segments, error analysis, examining \ overfitting, comparing to baseline, and more. List of Checks: .. list-table:: List of Checks :widths: 50 50 :header-rows: 1 * - Check Example - API Reference * - :ref:`plot_tabular_performance_report` - :class:`~runml_checks.tabular.checks.model_evaluation.PerformanceReport` * - :ref:`plot_tabular_roc_report` - :class:`~runml_checks.tabular.checks.model_evaluation.RocReport` * - :ref:`plot_tabular_confusion_matrix_report` - :class:`~runml_checks.tabular.checks.model_evaluation.ConfusionMatrixReport` * - :ref:`plot_tabular_segment_performance` - :class:`~runml_checks.tabular.checks.model_evaluation.SegmentPerformance` * - :ref:`plot_tabular_train_test_prediction_drift` - :class:`~runml_checks.tabular.checks.model_evaluation.TrainTestPredictionDrift` * - :ref:`plot_tabular_simple_model_comparison` - :class:`~runml_checks.tabular.checks.model_evaluation.SimpleModelComparison` * - :ref:`plot_tabular_model_error_analysis` - :class:`~runml_checks.tabular.checks.model_evaluation.ModelErrorAnalysis` * - :ref:`plot_tabular_calibration_score` - :class:`~runml_checks.tabular.checks.model_evaluation.CalibrationScore` * - :ref:`plot_tabular_regression_systematic_error` - :class:`~runml_checks.tabular.checks.model_evaluation.RegressionSystematicError` * - :ref:`plot_tabular_regression_error_distribution` - :class:`~runml_checks.tabular.checks.model_evaluation.RegressionErrorDistribution` * - :ref:`plot_tabular_unused_features` - :class:`~runml_checks.tabular.checks.model_evaluation.UnusedFeatures` * - :ref:`plot_tabular_boosting_overfit` - :class:`~runml_checks.tabular.checks.model_evaluation.BoostingOverfit` * - :ref:`plot_tabular_model_inference_time` - :class:`~runml_checks.tabular.checks.model_evaluation.ModelInferenceTime` Parameters ---------- alternative_scorers : Dict[str, Callable], default: None An optional dictionary of scorer name to scorer functions. If none given, use default scorers columns : Union[Hashable, List[Hashable]] , default: None The columns to be checked. If None, all columns will be checked except the ones in `ignore_columns`. ignore_columns : Union[Hashable, List[Hashable]] , default: None The columns to be ignored. If None, no columns will be ignored. n_top_columns : int , optional number of columns to show ordered by feature importance (date, index, label are first) (check dependent) n_samples : int , default: 1_000_000 number of samples to use for checks that sample data. If none, using the default n_samples per check. random_state : int, default: 42 random seed for all checks. n_to_show : int , default: 5 number of top results to show (check dependent) **kwargs : dict additional arguments to pass to the checks. Returns ------- Suite A suite for evaluating the model's performance. Examples -------- >>> from runml_checks.tabular.suites import model_evaluation >>> suite = model_evaluation(columns=['a', 'b', 'c'], n_samples=1_000_000) >>> result = suite.run() >>> result.show() See Also -------- :ref:`quick_full_suite` """ args = locals() args.pop('kwargs') non_none_args = {k: v for k, v in args.items() if v is not None} kwargs = {**non_none_args, **kwargs} return Suite( 'Model Evaluation Suite', PerformanceReport(**kwargs).add_condition_train_test_relative_degradation_less_than(), RocReport(**kwargs).add_condition_auc_greater_than(), ConfusionMatrixReport(**kwargs), TrainTestPredictionDrift(**kwargs).add_condition_drift_score_less_than(), SimpleModelComparison(**kwargs).add_condition_gain_greater_than(), WeakSegmentsPerformance(**kwargs).add_condition_segments_relative_performance_greater_than(), CalibrationScore(**kwargs), RegressionSystematicError(**kwargs).add_condition_systematic_error_ratio_to_rmse_less_than(), RegressionErrorDistribution(**kwargs).add_condition_kurtosis_greater_than(), UnusedFeatures(**kwargs).add_condition_number_of_high_variance_unused_features_less_or_equal(), BoostingOverfit(**kwargs).add_condition_test_score_percent_decline_less_than(), ModelInferenceTime(**kwargs).add_condition_inference_time_less_than(), ) def full_suite(**kwargs) -> Suite: """Create a suite that includes many of the implemented checks, for a quick overview of your model and data.""" return Suite( 'Full Suite', model_evaluation(**kwargs), train_test_validation(**kwargs), data_integrity(**kwargs), )
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/tabular/suites/default_suites.py
0.896115
0.584864
default_suites.py
pypi
import inspect from typing import Callable import torch from deepchecks.core.errors import DeepchecksBaseError from deepchecks.vision import Context, SingleDatasetCheck, TrainTestCheck, checks from deepchecks.vision.datasets.classification import mnist from deepchecks.vision.datasets.detection import coco from deepchecks.vision.vision_data import VisionData device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") def create_static_predictions(train: VisionData, test: VisionData, model): static_preds = [] for vision_data in [train, test]: if vision_data is not None: static_pred = {} for i, batch in enumerate(vision_data): predictions = vision_data.infer_on_batch(batch, model, device) indexes = list(vision_data.data_loader.batch_sampler)[i] static_pred.update(dict(zip(indexes, predictions))) else: static_pred = None static_preds.append(static_pred) train_preds, tests_preds = static_preds return train_preds, tests_preds def run_check_fn(check_class) -> Callable: def run(self, cache, dataset_name): train_ds, test_ds, train_pred, test_pred = cache[dataset_name] check = check_class() try: if isinstance(check, SingleDatasetCheck): check.run(train_ds, train_predictions=train_pred, device=device) elif isinstance(check, TrainTestCheck): check.run(train_ds, test_ds, train_predictions=train_pred, test_predictions=test_pred, device=device) except DeepchecksBaseError: pass return run def setup_mnist() -> Context: mnist_model = mnist.load_model() train_ds = mnist.load_dataset(train=True, object_type='VisionData') test_ds = mnist.load_dataset(train=False, object_type='VisionData') train_preds, tests_preds = create_static_predictions(train_ds, test_ds, mnist_model) return train_ds, test_ds, train_preds, tests_preds def setup_coco() -> Context: coco_model = coco.load_model() train_ds = coco.load_dataset(train=True, object_type='VisionData') test_ds = coco.load_dataset(train=False, object_type='VisionData') train_preds, tests_preds = create_static_predictions(train_ds, test_ds, coco_model) return train_ds, test_ds, train_preds, tests_preds class BenchmarkVision: timeout = 120 params = ['mnist', 'coco'] param_names = ['dataset_name'] def setup_cache(self): cache = {} cache['mnist'] = setup_mnist() cache['coco'] = setup_coco() return cache for name, check_class in inspect.getmembers(checks): if inspect.isclass(check_class): run_fn = run_check_fn(check_class) setattr(BenchmarkVision, f'time_{name}', run_fn) setattr(BenchmarkVision, f'peakmem_{name}', run_fn)
/runml_checks-1.0.0-py3-none-any.whl/benchmarks/vision_bench.py
0.641198
0.414366
vision_bench.py
pypi
from .runnerlog import RunnerLog as Logger class Assertion: '''Assertion class provides the assert statement in test case''' def __init__(self, order = None): self.failureException = AssertionError self.order = order def assertTrue(self, expr=True, msg=None): '''Fail the test unless the expression is true.''' if expr != True: Logger.check_fail(msg, self.order) raise self.failureException, (msg + " deviceOrder=%s"%(self.order if self.order else 0)) def assertFalse(self, expr=False, msg=None): '''Fail the test unless the expression is false.''' if expr != False: Logger.check_fail(msg, self.order) raise self.failureException, (msg + " deviceOrder=%s"%(self.order if self.order else 0)) def assertEqual(self, first, second, msg=None): ''' Fail if the two objects are unequal as determined by the == operator. ''' if not (first == second): if msg: Logger.check_fail(msg, self.order) raise self.failureException, (msg + " deviceOrder=%s"%(self.order if self.order else 0)) else: Logger.check_fail('%s != %s'%(first, second), self.order) raise self.failureException, '%s != %s'%(first, second) + " deviceOrder=%s"%(self.order if self.order else 0) def assertNotEqual(self, first, second, msg=None): ''' Fail if the two objects are equal as determined by the == operator. ''' if first == second: if msg: Logger.check_fail(msg, self.order) raise self.failureException, (msg + " deviceOrder=%s"%(self.order if self.order else 0)) else: Logger.check_fail('%s != %s'%(first, second), self.order) raise self.failureException, '%s != %s'%(first, second) + " deviceOrder=%s"%(self.order if self.order else 0) def assertMsg(self, msg): Logger.check_fail(msg, self.order) raise self.failureException, (msg + " deviceOrder=%d"%(self.order if self.order else 0))
/runner-easyuiautomator-1.3.tar.gz/runner-easyuiautomator-1.3/runner/common/assertion.py
0.501709
0.186003
assertion.py
pypi
import logging import traceback from time import perf_counter from typing import List, Optional, Sequence import pandas as pd # type: ignore from mate.alerts import Alert, AlertTarget, FeatureAlertKind, InferenceException from mate.checks import is_out_of_bounds, is_outlier from mate.db import ( Feature, FeatureAlert, FeatureValue, Inference, NumericalStats, StringStats, get_current_mate, get_features, get_statistics, ) from mate.stats import CustomStats, FeatureType, Statistics MATE_STATISTICS_PATH_VAR = "MATE_STATISTICS_PATH" MATE_CONSTRAINTS_PATH_VAR = "MATE_CONSTRAINTS_PATH" logger = logging.getLogger("mate") class RunningMate(object): mate_name: str statistics: Optional[Statistics] feature_alerts: List[FeatureAlert] targets: Sequence[AlertTarget] def __init__( self, mate_name: str, mate_version: int, df: pd.DataFrame, targets: Sequence[AlertTarget], custom_stats: List[CustomStats] = None, should_save_all_feature_values: bool = False, ): self.mate_name = mate_name self.mate_version = mate_version self.targets = targets self.custom_stats = custom_stats self.should_save_all_feature_values = should_save_all_feature_values self.mate = get_current_mate(self.mate_name) if self.mate: self.features = get_features(self.mate) self.inference = Inference.create(mate=self.mate) logger.info("\n") logger.info(f"Using mate version {self.mate_version}.") if self.should_save_all_feature_values: for feature in self.features: col = df[feature.name] FeatureValue.create( value=col.item(), feature=feature, inference=self.inference, ) self.feature_alerts: List[FeatureAlert] = [] self.feature_alerts.extend(self._check_statistics(df)) else: logger.info("\n") logger.info(f"Mate version {self.mate_version} not found.") def _check_statistics(self, df: pd.DataFrame) -> List[FeatureAlert]: result = [] for feature in self.features: col = df[feature.name] result.extend(self._check_feature_statistics(feature, col)) if self.custom_stats: for stat in self.custom_stats: if feature.name == stat.feature: feature_value, created = FeatureValue.get_or_create( feature=feature, value=col.item(), inference=self.inference ) result.append( FeatureAlert.create( name=col.name, kind=stat.name, feature_value=feature_value, feature=feature, inference=self.inference, ) ) logger.info(f"Found {len(result)} statistical alerts.") return result def _check_feature_statistics( self, feature: Feature, col: pd.Series ) -> List[FeatureAlert]: result: List[FeatureAlert] = [] num_missing = None if feature.inferred_type in [ FeatureType.INTEGER.value, FeatureType.FRACTION.value, ]: numerical_statistic = get_statistics(NumericalStats, feature) # outlier check if is_outlier( float(col.item()), numerical_statistic.mean, numerical_statistic.std_dev ): feature_value, created = FeatureValue.get_or_create( feature=feature, value=col.item(), inference=self.inference ) result.append( FeatureAlert.create( name=col.name, kind=FeatureAlertKind.OUTLIER.value, feature_value=feature_value, feature=feature, inference=self.inference, ) ) # bound check if is_out_of_bounds( float(col.item()), float(numerical_statistic.min), float(numerical_statistic.max), ): feature_value, created = FeatureValue.get_or_create( feature=feature, value=col.item(), inference=self.inference ) result.append( FeatureAlert.create( name=col.name, kind=FeatureAlertKind.BOUND.value, feature_value=feature_value, feature=feature, inference=self.inference, ) ) num_missing = numerical_statistic.num_missing if feature.inferred_type == FeatureType.STRING.value: string_statistic = get_statistics(StringStats, feature) num_missing = string_statistic.num_missing # null check if num_missing is not None and num_missing == 0: if col.isnull().any(): feature_value, created = FeatureValue.get_or_create( feature=feature, value=col.item(), inference=self.inference ) result.append( FeatureAlert.create( name=col.name, kind=FeatureAlertKind.NULL.value, feature_value=feature_value, feature=feature, inference=self.inference, ) ) return result def __enter__(self): self.time_start = perf_counter() def __exit__(self, type, value, tb): time_end = perf_counter() runtime = time_end - self.time_start self.inference.runtime = runtime self.inference.save() logger.info(f"Elapsed inference time in seconds: {runtime}") inference_exception = None if type is not None: logger.exception(f"Exception occurred during model inference: {value}") inference_exception = InferenceException( message=value, traceback="\n".join(traceback.format_exception(type, value, tb)), ) alert = Alert(self.mate_name, self.feature_alerts, inference_exception) if len(alert.features) > 0 or alert.exception is not None: for target in self.targets: target.send_alert(alert)
/running_mate-0.0.6-py3-none-any.whl/mate/run.py
0.781664
0.196055
run.py
pypi
import json import logging from abc import ABC, abstractmethod from dataclasses import asdict, dataclass from enum import Enum from typing import Dict, List, Optional, Union import requests # type: ignore from mate.db import FeatureAlert logger = logging.getLogger("mate") class FeatureAlertKind(Enum): OUTLIER = "outlier" # median absolute deviation is greater than 4.0 BOUND = "bound" # outside of lower or upper bounds TYPE = "type" # incorrect data type NULL = "null" # null when feature is non-nullable @dataclass class InferenceException: message: str traceback: str @dataclass class Alert: mate_name: str features: List[FeatureAlert] exception: Optional[InferenceException] @dataclass class AlertOut: mate_name: str mate_version: int features: List[Dict[str, str]] class AlertTarget(ABC): @abstractmethod def _alert_webhook_url(self) -> str: pass @abstractmethod def _format_alert(self, alert: Alert) -> Union[str, Dict]: pass def send_alert(self, alert: Alert): pass class AlertWebhookTarget(AlertTarget): def __init__(self, webhook_url: str): self.webhook_url = webhook_url def _alert_webhook_url(self) -> str: return self.webhook_url def _format_alert(self, alert: Alert) -> Dict[str, str]: features: List[Dict[str, str]] = [] for feature_alert in alert.features: features.append( { "feature_name": feature_alert.name, "feature_alert_kind": feature_alert.kind, "feature_value": feature_alert.feature_value.value, } ) mate_version = feature_alert.feature.mate.version alert_out = AlertOut( mate_name=alert.mate_name, mate_version=mate_version, features=features ) return asdict(alert_out) def send_alert(self, alert: Alert): if (alert.exception is None) and (len(alert.features) == 0): logger.error("Alert has no exception/feature alerts. Not sending") return formatted_alert = self._format_alert(alert) logger.info(formatted_alert) resp = requests.post(self._alert_webhook_url(), json=formatted_alert) if resp.ok: logger.info(f"Sent alert to {type(self).__name__}") else: logger.error( f"Failed to send alert to {type(self).__name__}. " + f"Code: {resp.status_code}. Body: {resp.text}" ) class TerminalAlertTarget(AlertTarget): """ Output alert to the terminal """ def _alert_webhook_url(self) -> str: pass def _format_alert(self, alert: Alert) -> Union[str, Dict]: alert_message = f"\nFeature Alerts for the '{alert.mate_name}' mate:\n" for feature_alert in alert.features: feature_message = ( f"\n Feature Name: {feature_alert.name}" + f"\n Feature Alert Kind: {feature_alert.kind}" + f"\n Feature Value: {feature_alert.feature_value.value}\n" ) alert_message += feature_message return alert_message def send_alert(self, alert: Alert): print(self._format_alert(alert)) class SlackAlertTarget(AlertWebhookTarget): """ Output alert to a Slack Channel via a Slack Webhook Parameters: slack_webhook_path (str): - Slack Webhook path - FORMAT: /XXXXX/XXXXXX/XXXXXXXXXXXXXXXXXXXX) - EXAMPLE: Given https://hooks.slack.com/services/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 the slack_webhook_path would be /TG8BB9UKZ/B02LE8DAU5V/KJ1Uyc1ZZcK1pEal44X5GWdL """ def __init__(self, slack_webhook_path: str): self.slack_webhook_path = slack_webhook_path def _alert_webhook_url(self) -> str: return f"https://hooks.slack.com/services{self.slack_webhook_path}" def _format_alert(self, alert: Alert) -> Dict[str, str]: features: List[Dict[str, str]] = [] for feature_alert in alert.features: features.append( { "feature_name": feature_alert.name, "feature_alert_kind": feature_alert.kind, "feature_value": feature_alert.feature_value.value, } ) mate_version = feature_alert.feature.mate.version alert_out = AlertOut( mate_name=alert.mate_name, mate_version=mate_version, features=features ) return {"text": json.dumps(asdict(alert_out))}
/running_mate-0.0.6-py3-none-any.whl/mate/alerts.py
0.809276
0.170404
alerts.py
pypi
import datetime import logging import os import pathlib from typing import List, Union from peewee import ( # type: ignore CharField, DatabaseProxy, DateTimeField, FloatField, ForeignKeyField, IntegerField, Model, SqliteDatabase, ) logger = logging.getLogger("mate") db = DatabaseProxy() class Mate(Model): name = CharField() version = IntegerField() item_count = IntegerField(null=True) created_at = DateTimeField(default=datetime.datetime.now) class Meta: database = db table_name = "mate" class Inference(Model): mate = ForeignKeyField(Mate) runtime = CharField(null=True) created_at = DateTimeField(default=datetime.datetime.now) class Meta: database = db table_name = "inference" class Feature(Model): name = CharField() inferred_type = CharField() mate = ForeignKeyField(Mate) created_at = DateTimeField(default=datetime.datetime.now) class Meta: database = db table_name = "feature" class NumericalStats(Model): num_present = IntegerField() num_missing = IntegerField() mean = FloatField() sum = IntegerField() std_dev = FloatField() min = IntegerField() max = IntegerField() feature = ForeignKeyField(Feature) created_at = DateTimeField(default=datetime.datetime.now) class Meta: database = db table_name = "numerical_stats" class StringStats(Model): num_present = IntegerField() num_missing = IntegerField() distinct_count = IntegerField() feature = ForeignKeyField(Feature) created_at = DateTimeField(default=datetime.datetime.now) class Meta: database = db table_name = "string_stats" class FeatureValue(Model): value = CharField(null=True) feature = ForeignKeyField(Feature) inference = ForeignKeyField(Inference) created_at = DateTimeField(default=datetime.datetime.now) class Meta: database = db table_name = "feature_value" class FeatureAlert(Model): name = CharField() kind = CharField() feature_value = ForeignKeyField(FeatureValue) feature = ForeignKeyField(Feature) inference = ForeignKeyField(Inference) class Meta: database = db table_name = "feature_alert" def connect_db(): db = init_db() db.connect() if ( not db.table_exists("mate") or not db.table_exists("inference") or not db.table_exists("feature") or not db.table_exists("numerical_stats") or not db.table_exists("string_stats") or not db.table_exists("feature_alert") or not db.table_exists("feature_value") ): db.create_tables( [ Mate, Inference, Feature, NumericalStats, StringStats, FeatureValue, FeatureAlert, ] ) def get_current_mate(name: str) -> Union[Mate, None]: mate = Mate.select().where(Mate.name == name).order_by(Mate.version.desc()).limit(1) if mate: return mate[0] return None def get_features(mate: Mate) -> List[Feature]: features = Feature.select().where(Feature.mate == mate) return features def get_feature_values(mate: Mate) -> List[FeatureValue]: feature_values = FeatureValue.select().join(Feature).where(Feature.mate == mate) return feature_values def get_statistics(Stats: Model, feature: Feature) -> Model: return Stats.get(feature=feature) def init_db() -> SqliteDatabase: test_mode = int(os.getenv("TESTING", "0")) if test_mode: database = SqliteDatabase(":memory:") logger.info("Using in-memory SQLite") else: BASE = pathlib.Path.cwd() database = SqliteDatabase(BASE / "mate.db") logger.info("Using disk-based SQLite") db.initialize(database) return db def version_or_create_mate(name: str) -> Mate: current_mate = get_current_mate(name) if current_mate: version = int(current_mate.version) + 1 logger.info(f"Model found. Creating new version: {version}.") else: version = 1 logger.info("Model not found. Creating new mate.") mate = Mate(name=name, version=version) mate.save() return mate
/running_mate-0.0.6-py3-none-any.whl/mate/db.py
0.661923
0.190799
db.py
pypi
import pandas as pd # type: ignore from mate.db import Feature, NumericalStats, StringStats, get_current_mate from mate.stats import ( CommonStatistics, FeatureStatistics, FeatureType, NumericalStatistics, Statistics, StringStatistics, ) def generate_baseline_stats(df: pd.DataFrame, name: str): statistics = _gen_statistics(df) mate = get_current_mate(name) if mate: mate.item_count = statistics.item_count mate.save() for feature in statistics.features: feature_stats = Feature( name=feature.name, inferred_type=feature.inferred_type, mate=mate ) feature_stats.save() if feature.numerical_statistics: NumericalStats.create( num_present=feature.numerical_statistics.common.num_present, num_missing=feature.numerical_statistics.common.num_missing, mean=feature.numerical_statistics.mean, sum=feature.numerical_statistics.sum, std_dev=feature.numerical_statistics.std_dev, min=feature.numerical_statistics.min, max=feature.numerical_statistics.max, feature=feature_stats, ) if feature.string_statistics: StringStats.create( num_present=feature.string_statistics.common.num_present, num_missing=feature.string_statistics.common.num_missing, distinct_count=feature.string_statistics.distinct_count, feature=feature_stats, ) def _create_statistics_feature(feature_series: pd.Series) -> FeatureStatistics: feature_name = feature_series.name feature_type = _infer_feature_type(feature_series) feature = FeatureStatistics(name=feature_name, inferred_type=feature_type.value) n_missing = feature_series.isna().sum() n_present = len(feature_series) - n_missing common = CommonStatistics(n_present, n_missing) if feature_type in [FeatureType.INTEGER, FeatureType.FRACTION]: feature.numerical_statistics = NumericalStatistics( common=common, mean=feature_series.mean(), sum=feature_series.sum(), std_dev=feature_series.std(), min=feature_series.min(), max=feature_series.max(), ) elif feature_type == FeatureType.STRING: feature.string_statistics = StringStatistics( common=common, distinct_count=len(feature_series.dropna().unique()) ) return feature def _gen_statistics(df: pd.DataFrame) -> Statistics: statistics = Statistics( item_count=len(df), features=[ _create_statistics_feature(feature_series) for name, feature_series in df.iteritems() ], ) return statistics def _infer_feature_type(feature_series: pd.Series) -> FeatureType: dtype_name = str(feature_series.dtype) # {"int8", "int16", "int32", "int64", "intp"} # {"uint8", "uint16", "uint32", "uint64", "uintp"} if dtype_name.startswith("int") or dtype_name.startswith("uint"): feature_type = FeatureType.INTEGER # {"float16", "float32", "float64", "float96", "float128"}: elif dtype_name.startswith("float"): feature_type = FeatureType.FRACTION # {"string", "<U16/32/...", ">U16/32/...", "=U16/32/..."} elif (dtype_name == "string") or (dtype_name[:2] in {"<U", ">U", "=U"}): feature_type = FeatureType.STRING elif dtype_name == "object": feature_type = FeatureType.UNKNOWN # attempt to infer if object dtype is actually a string types = set(map(type, feature_series.dropna())) if types == {str}: feature_type = FeatureType.STRING else: # Bools, datetimes, etc are all treated as unknown feature_type = FeatureType.UNKNOWN return feature_type
/running_mate-0.0.6-py3-none-any.whl/mate/generators.py
0.518302
0.415195
generators.py
pypi
from typing import Any, Dict import yaml from running.suite import BenchmarkSuite from running.runtime import Runtime from running.modifier import Modifier from pathlib import Path import functools import copy import logging import os def load_class(cls, config): return {k: cls.from_config(k, v) for (k, v) in config.items()} KEY_CLASS_MAPPING = { "suites": BenchmarkSuite, "modifiers": Modifier, "runtimes": Runtime } class Configuration(object): def __init__(self, kv_pairs: Dict[str, Any]): assert "includes" not in kv_pairs assert "overrides" not in kv_pairs self.__items = kv_pairs def save_to_file(self, fd): yaml.dump(self.__items, fd) def resolve_class(self): """Resolve the values by instantiating instances of classes For example, self.values["suites"] is a Dict[str, Dict[str, str]], where in the inner dictionary contains the string representation of a benchmark suite. After this function returns, self.values["suites"] becomes a Dict[str, BenchmarkSuite]. Change the KEY_CLASS_MAPPING to change which classes get resolved. """ for cls_name, cls in KEY_CLASS_MAPPING.items(): if cls_name in self.__items: self.__items[cls_name] = load_class( cls, self.__items[cls_name]) if "benchmarks" in self.__items: for suite_name, bms in self.__items["benchmarks"].items(): suite = self.__items["suites"][suite_name] benchmarks = [] for b in bms: benchmarks.append(suite.get_benchmark(b)) self.__items["benchmarks"][suite_name] = benchmarks def get(self, name: str) -> Any: return self.__items.get(name) def override(self, selector: str, new_value: Any): current: Any # Union[Dict[str, Any], List[Any]] current = self.__items parts = list(selector.split(".")) for index, p in enumerate(parts): if index == len(parts) - 1: if p.isnumeric(): current[int(p)] = new_value else: current[p] = new_value else: if p.isnumeric(): current = current[int(p)] else: current = current[p] def combine(self, other: "Configuration") -> "Configuration": """Combine top-level items of self.values. Arrays are concatenated and dictionaries are updated. """ new_values = copy.deepcopy(self.__items) for k, v in other.__items.items(): if k in new_values: if type(new_values[k]) is list: new_values[k].extend(copy.deepcopy(other.__items[k])) else: if type(new_values[k]) is not dict: raise TypeError( "Key `{}` has been defined in one of the " "included files, and the value of `{}`, {}, " "is not an array or a dictionary. " "Please use overrides instead.".format( k, k, repr(v) )) new_values[k].update(copy.deepcopy(other.__items[k])) else: new_values[k] = copy.deepcopy(other.__items[k]) return Configuration(new_values) @staticmethod def parse_file(path: Path) -> Any: with path.open("r") as fd: try: config = yaml.safe_load(fd) return config except yaml.YAMLError as e: raise SyntaxError( "Not able to parse the configuration file, {}".format(e)) @staticmethod def from_file(in_folder: Path, p: str) -> "Configuration": expand_p = os.path.expandvars(p) logging.info("Loading config {}, expanding to {}, relative to {}".format( p, expand_p, in_folder)) path = Path(expand_p) if path.is_absolute(): logging.info(" is absolute") else: path = in_folder.joinpath(p) logging.info(" resolved to {}".format(path)) if not path.exists(): raise ValueError( "Configuration not found at path '{}'".format(path)) if not path.is_file(): raise ValueError( "Configuration at path '{}' is not a file".format(path)) with path.open("r") as fd: try: config = yaml.safe_load(fd) except yaml.YAMLError as e: raise SyntaxError( "Not able to parse the configuration file, {}".format(e)) if config is None: raise ValueError("Parsed configuration file is None") if "includes" in config: includes = [Configuration.from_file( path.parent, p) for p in config["includes"]] base = functools.reduce( lambda left, right: left.combine(right), includes) if "overrides" in config: for selector, new_value in config["overrides"].items(): base.override(selector, new_value) del config["overrides"] del config["includes"] final_config = Configuration(config) final_config = base.combine(final_config) else: if "overrides" in config: raise KeyError( 'You specified "overrides" but not "includes". This does not make sense.') final_config = Configuration(config) return final_config
/running-ng-0.4.1.tar.gz/running-ng-0.4.1/src/running/config.py
0.674158
0.290477
config.py
pypi
from pathlib import Path from typing import Any, Dict, Optional, Union, List, Sequence from running.benchmark import JavaBenchmark, BinaryBenchmark, Benchmark, JavaScriptBenchmark, JuliaBenchmark from running.runtime import OpenJDK, Runtime from running.modifier import JVMArg, Modifier import logging from running.util import register, split_quoted __DRY_RUN = False __DEFAULT_MINHEAP = 4096 def is_dry_run(): global __DRY_RUN return __DRY_RUN def set_dry_run(val: bool): global __DRY_RUN __DRY_RUN = val def parse_timing_iteration(t: Optional[str], suite_name: str) -> Union[str, int]: if not t: raise KeyError( "You need to specify the timing_iteration for a {} suite".format(suite_name)) assert t is not None try: t_parsed = int(t) return t_parsed except ValueError: return t class BenchmarkSuite(object): CLS_MAPPING: Dict[str, Any] CLS_MAPPING = {} def __init__(self, name: str, **kwargs): self.name = name def __str__(self) -> str: return "Benchmark Suite {}".format(self.name) @staticmethod def from_config(name: str, config: Dict[str, str]) -> Any: return BenchmarkSuite.CLS_MAPPING[config["type"]](name=name, **config) def get_benchmark(self, _bm_spec: Union[str, Dict[str, Any]]) -> Any: raise NotImplementedError() def get_minheap(self, _bm: Benchmark) -> int: raise NotImplementedError def is_passed(self, _output: bytes) -> bool: raise NotImplementedError @register(BenchmarkSuite) class BinaryBenchmarkSuite(BenchmarkSuite): def __init__(self, programs: Dict[str, Dict[str, str]], **kwargs): super().__init__(**kwargs) self.programs: Dict[str, Dict[str, Any]] self.programs = { k: { 'path': Path(v['path']), 'args': split_quoted(v['args']) } for k, v in programs.items() } self.timeout = kwargs.get("timeout") def get_benchmark(self, bm_spec: Union[str, Dict[str, Any]]) -> 'BinaryBenchmark': assert type(bm_spec) is str bm_name = bm_spec return BinaryBenchmark( self.programs[bm_name]['path'], self.programs[bm_name]['args'], suite_name=self.name, name=bm_name, timeout=self.timeout ) def get_minheap(self, bm: Benchmark) -> int: logging.warning("minheap is not respected for BinaryBenchmarkSuite") assert isinstance(bm, BinaryBenchmark) return 0 def is_passed(self, _output: bytes) -> bool: # FIXME no generic way to know return True class JavaBenchmarkSuite(BenchmarkSuite): def __init__(self, **kwargs): super().__init__(**kwargs) def get_minheap(self, _bm: Benchmark) -> int: raise NotImplementedError() @register(BenchmarkSuite) class DaCapo(JavaBenchmarkSuite): def __init__(self, **kwargs): super().__init__(**kwargs) self.release: str self.release = kwargs["release"] if self.release not in ["2006", "9.12", "evaluation"]: raise ValueError( "DaCapo release {} not recongized".format(self.release)) self.path: Path self.path = Path(kwargs["path"]) if not self.path.exists(): logging.warning("DaCapo jar {} not found".format(self.path)) self.minheap: Optional[str] self.minheap = kwargs.get("minheap") self.minheap_values: Dict[str, Dict[str, int]] self.minheap_values = kwargs.get("minheap_values", {}) if not isinstance(self.minheap_values, dict): raise TypeError( "The minheap_values of {} should be a dictionary".format(self.name)) if self.minheap: if not isinstance(self.minheap, str): raise TypeError( "The minheap of {} should be a string that selects from a minheap_values".format(self.name)) if self.minheap not in self.minheap_values: raise KeyError( "{} is not a valid entry of {}.minheap_values".format(self.name, self.name)) self.timing_iteration = parse_timing_iteration( kwargs.get("timing_iteration"), "DaCapo") if isinstance(self.timing_iteration, str) and self.timing_iteration != "converge": raise TypeError("The timing iteration of the DaCapo benchmark suite `{}` is {}, which neither an integer nor 'converge'".format( self.path, repr(self.timing_iteration) )) self.callback: Optional[str] self.callback = kwargs.get("callback") self.timeout: Optional[int] self.timeout = kwargs.get("timeout") self.wrapper: Optional[Union[Dict[str, str], str]] self.wrapper = kwargs.get("wrapper") self.companion: Optional[Union[Dict[str, str], str]] self.companion = kwargs.get("companion") # user overriding the default size for the entire suite self.size: Optional[str] self.size = kwargs.get("size") def __str__(self) -> str: return "{} DaCapo {} {}".format(super().__str__(), self.release, self.path) @staticmethod def parse_timing_iteration(v: Any): try: timing_iteration = int(v) except ValueError: if v != "converge": raise TypeError("The timing iteration {} is neither an integer nor 'converge'".format( repr(v) )) timing_iteration = v return timing_iteration def get_benchmark(self, bm_spec: Union[str, Dict[str, Any]]) -> 'JavaBenchmark': timing_iteration = self.timing_iteration timeout = self.timeout size = self.size if type(bm_spec) is str: bm_name = bm_spec name = bm_spec else: assert type(bm_spec) is dict if "bm_name" not in bm_spec or "name" not in bm_spec: raise KeyError( "When a dictionary is used to speicfy a benchmark, you need to provide both `name` and `bm_name`") bm_name = bm_spec["bm_name"] name = bm_spec["name"] if "timing_iteration" in bm_spec: timing_iteration = DaCapo.parse_timing_iteration( bm_spec["timing_iteration"]) # user overriding the size for that benchmark if "size" in bm_spec: size = bm_spec["size"] if "timeout" in bm_spec: timeout = bm_spec["timeout"] if self.callback: cp = [str(self.path)] program_args = ["Harness", "-c", self.callback] else: cp = [] program_args = ["-jar", str(self.path)] # Timing iteration if type(timing_iteration) is int: program_args.extend(["-n", str(timing_iteration)]) else: assert timing_iteration == "converge" if self.release == "2006": program_args.append("-converge") else: program_args.append("--converge") # Input size if size: program_args.extend(["-s", size]) # Name of the benchmark program_args.append(bm_name) # https://github.com/anupli/running-ng/issues/111 # https://mmtk.zulipchat.com/#narrow/stream/262677-ANU-Research/topic/Using.20new.20dacapo/near/270150954 def strategy(runtime: Runtime) -> Sequence[Modifier]: modifiers = [] if isinstance(runtime, OpenJDK): if runtime.release >= 9: modifiers.append(JVMArg( name="add_exports", val="--add-exports java.base/jdk.internal.ref=ALL-UNNAMED" )) return modifiers return JavaBenchmark( jvm_args=[], program_args=program_args, cp=cp, wrapper=self.get_wrapper(bm_name), companion=self.get_companion(bm_name), suite_name=self.name, name=name, timeout=timeout, runtime_specific_modifiers_strategy=strategy ) def get_minheap(self, bm: Benchmark) -> int: assert isinstance(bm, JavaBenchmark) name = bm.name if not self.minheap: logging.warning( "No minheap_value of {} is selected".format(self)) return __DEFAULT_MINHEAP minheap = self.minheap_values[self.minheap] if name not in minheap: logging.warning( "Minheap for {} of {} not set".format(name, self)) return __DEFAULT_MINHEAP return minheap[name] def is_passed(self, output: bytes) -> bool: return b"PASSED" in output def get_wrapper(self, bm_name: str) -> Optional[str]: if self.wrapper is None: return None elif type(self.wrapper) == str: return self.wrapper elif type(self.wrapper) == dict: return self.wrapper.get(bm_name) else: raise TypeError("wrapper of {} must be either null, " "a string (the same wrapper for all benchmarks), " "or a dictionary (different wrappers for" "differerent benchmarks)".format(self.name)) def get_companion(self, bm_name: str) -> Optional[str]: if self.companion is None: return None elif type(self.companion) == str: return self.companion elif type(self.companion) == dict: return self.companion.get(bm_name) else: raise TypeError("companion of {} must be either null, " "a string (the same companion for all benchmarks), " "or a dictionary (different companions for" "differerent benchmarks)".format(self.name)) @register(BenchmarkSuite) class SPECjbb2015(JavaBenchmarkSuite): def __init__(self, **kwargs): super().__init__(**kwargs) self.release: str self.release = kwargs["release"] if self.release not in ["1.03"]: raise ValueError( "SPECjbb2015 release {} not recongized".format(self.release)) self.path: Path self.path = Path(kwargs["path"]).resolve() self.propsfile = (self.path / ".." / "config" / "specjbb2015.props").resolve() if not self.path.exists(): logging.info("SPECjbb2015 jar {} not found".format(self.path)) def __str__(self) -> str: return "{} SPECjbb2015 {} {}".format(super().__str__(), self.release, self.path) def get_benchmark(self, bm_spec: Union[str, Dict[str, Any]]) -> 'JavaBenchmark': assert type(bm_spec) is str if bm_spec != "composite": raise ValueError("Only composite mode is supported for now") program_args = [ "-jar", str(self.path), "-p", str(self.propsfile), "-m", "COMPOSITE", "-skipReport" ] return JavaBenchmark( jvm_args=[], program_args=program_args, cp=[], suite_name=self.name, name="composite" ) def get_minheap(self, _bm: Benchmark) -> int: return 2048 # SPEC recommends running with minimum 2GB of heap def is_passed(self, output: bytes) -> bool: # FIXME return True @register(BenchmarkSuite) class Octane(BenchmarkSuite): def __init__(self, **kwargs): super().__init__(**kwargs) self.path: Path self.path = Path(kwargs["path"]).resolve() if not self.path.exists(): logging.info("Octane folder {} not found".format(self.path)) self.wrapper: Path self.wrapper = Path(kwargs["wrapper"]).resolve() if not self.wrapper.exists(): logging.info("Octane folder {} not found".format(self.wrapper)) timing_iteration = parse_timing_iteration( kwargs.get("timing_iteration"), "Octane") self.timing_iteration: int if isinstance(timing_iteration, str): raise TypeError( "timing_iteration for Octane has to be an integer") else: self.timing_iteration = timing_iteration self.minheap: Optional[str] self.minheap = kwargs.get("minheap") self.minheap_values: Dict[str, Dict[str, int]] self.minheap_values = kwargs.get("minheap_values", {}) if not isinstance(self.minheap_values, dict): raise TypeError( "The minheap_values of {} should be a dictionary".format(self.name)) if self.minheap: if not isinstance(self.minheap, str): raise TypeError( "The minheap of {} should be a string that selects from a minheap_values".format(self.name)) if self.minheap not in self.minheap_values: raise KeyError( "{} is not a valid entry of {}.minheap_values".format(self.name, self.name)) self.timeout: Optional[int] self.timeout = kwargs.get("timeout") def __str__(self) -> str: return "{} Octane {}".format(super().__str__(), self.path) def get_benchmark(self, bm_spec: Union[str, Dict[str, Any]]) -> 'JavaScriptBenchmark': assert type(bm_spec) is str program_args = [ str(self.path), bm_spec, str(self.timing_iteration) ] return JavaScriptBenchmark( js_args=[], program=str(self.wrapper), program_args=program_args, suite_name=self.name, name=bm_spec, timeout=self.timeout ) def get_minheap(self, bm: Benchmark) -> int: assert isinstance(bm, JavaScriptBenchmark) name = bm.name if not self.minheap: logging.warning( "No minheap_value of {} is selected".format(self)) return __DEFAULT_MINHEAP minheap = self.minheap_values[self.minheap] if name not in minheap: logging.warning( "Minheap for {} of {} not set".format(name, self)) return __DEFAULT_MINHEAP return minheap[name] def is_passed(self, output: bytes) -> bool: return b"PASSED" in output @register(BenchmarkSuite) class SPECjvm98(JavaBenchmarkSuite): def __init__(self, **kwargs): super().__init__(**kwargs) self.release: str self.release = kwargs["release"] if self.release not in ["1.03_05"]: raise ValueError( "SPECjvm98 release {} not recongized".format(self.release)) self.path: Path self.path = Path(kwargs["path"]).resolve() if not self.path.exists(): logging.info("SPECjvm98 {} not found".format(self.path)) if not (self.path / "SpecApplication.class").exists(): logging.info( "SpecApplication.class not found under SPECjvm98 {}".format(self.path)) timing_iteration = parse_timing_iteration( kwargs.get("timing_iteration"), "SPECjvm98") self.timing_iteration: int if isinstance(timing_iteration, str): raise TypeError( "timing_iteration for SPECjvm98 has to be an integer") else: self.timing_iteration = timing_iteration def __str__(self) -> str: return "{} SPECjvm98 {} {}".format(super().__str__(), self.release, self.path) def get_benchmark(self, bm_spec: Union[str, Dict[str, Any]]) -> 'JavaBenchmark': assert type(bm_spec) is str program_args = [ "SpecApplication", "-i{}".format(self.timing_iteration), bm_spec ] return JavaBenchmark( jvm_args=[], program_args=program_args, cp=[str(self.path)], suite_name=self.name, name=bm_spec, override_cwd=self.path ) def get_minheap(self, _bm: Benchmark) -> int: # FIXME allow user to measure and specify minimum heap sizes return 32 # SPEC recommends running with minimum 32MB of heap def is_passed(self, output: bytes) -> bool: # FIXME return b"**NOT VALID**" not in output @register(BenchmarkSuite) class JuliaGCBenchmarks(BenchmarkSuite): def __init__(self, **kwargs): super().__init__(**kwargs) self.path: Path self.path = Path(kwargs["path"]) if not self.path.exists(): logging.warning( "JuliaGCBenchmarks does not exist at {}".format(self.path)) self.minheap: Optional[str] self.minheap = kwargs.get("minheap") self.minheap_values: Dict[str, Dict[str, int]] self.minheap_values = kwargs.get("minheap_values", {}) if not isinstance(self.minheap_values, dict): raise TypeError( "The minheap_values of {} should be a dictionary".format(self.name)) if self.minheap: if not isinstance(self.minheap, str): raise TypeError( "The minheap of {} should be a string that selects from a minheap_values".format(self.name)) if self.minheap not in self.minheap_values: raise KeyError( "{} is not a valid entry of {}.minheap_values".format(self.name, self.name)) def __str__(self) -> str: return "{} JuliaGCBenchmarks {}".format(super().__str__(), self.path) def get_minheap(self, bm: Benchmark) -> int: name = bm.name if not self.minheap: logging.warning( "No minheap_value of {} is selected".format(self)) return __DEFAULT_MINHEAP minheap = self.minheap_values[self.minheap] if name not in minheap: logging.warning( "Minheap for {} of {} not set".format(name, self)) return __DEFAULT_MINHEAP return minheap[name] def get_benchmark(self, bm_spec: Union[str, Dict[str, Any]]) -> 'JuliaBenchmark': assert type(bm_spec) is str return JuliaBenchmark( julia_args=[], suite_name=self.name, name=bm_spec, suite_path=self.path, program_args=[], ) def is_passed(self, output: bytes) -> bool: # FIXME return True
/running-ng-0.4.1.tar.gz/running-ng-0.4.1/src/running/suite.py
0.850608
0.214445
suite.py
pypi
from typing import Optional, Iterable, Callable import subprocess def fillin(callback: Callable[[int, Iterable[int]], None], levels: int, start: Optional[int] = None): """Fill the parameter space The parameter space is from 0, 1, 2, ..., 2^levels (not right-inclusive). The advantage of using this function is that it fills in the points in the space logarithmically, starting with the ends and middle, and progressing until all 2^levels + 1 points are explored. Parameters ---------- callback : Callable[[int, Iterable[int]], None] The arguments represent a list of fractions. For example, callback(8, [2, 6]) means 2/8 and 6/8. levels : int The log value of the size of the parameter space. start: Optional[int] Instead of starting from 0, start from a specified value. """ commenced = False if start is None: callback(2**levels, range(0, 2 ** levels + 1, 2**(levels-1))) commenced = True i = 1 while i < levels: base = 2 ** (levels - 1 - i) # larger i, smaller base step = 2 ** (levels - i) # larger i, smaller step if start is not None and base == start: commenced = True if commenced: callback(2**levels, range(base, 2 ** levels, step)) i += 1 def cmd_program(prog) -> Callable[[int, Iterable[int]], None]: def callback(end, ns): cmd = [prog] cmd.append(str(end)) cmd.extend(map(str, ns)) output = subprocess.check_output(cmd) print(output.decode("utf-8"), end="") return callback def setup_parser(subparsers): f = subparsers.add_parser("fillin") f.set_defaults(which="fillin") f.add_argument("PROG") f.add_argument("LEVELS", type=int) f.add_argument("START", type=int, nargs='?', default=None) def run(args): if args.get("which") != "fillin": return False callback = cmd_program(args.get("PROG")) fillin(callback, args.get("LEVELS"), args.get("START")) return True
/running-ng-0.4.1.tar.gz/running-ng-0.4.1/src/running/command/fillin.py
0.925091
0.585042
fillin.py
pypi
from copy import deepcopy from pathlib import Path import gzip import enum from typing import Any, Callable, Dict, List import functools import re from running.config import Configuration import os MMTk_HEADER = "============================ MMTk Statistics Totals ============================" MMTk_FOOTER = "------------------------------ End MMTk Statistics -----------------------------" class EditingMode(enum.Enum): NotEditing = 1 Names = 2 Values = 3 TotalTime = 4 def setup_parser(subparsers): f = subparsers.add_parser("preproc") f.set_defaults(which="preproc") f.add_argument("CONFIG", type=Path) f.add_argument("SOURCE", type=Path) f.add_argument("TARGET", type=Path) def filter_stats(predicate: Callable[[str], bool]): def inner(stats: Dict[str, float]): return {k: v for (k, v) in stats.items() if predicate(k)} return inner def reduce_stats(pattern: str, new_column: str, func): compiled = re.compile(pattern) def inner(stats: Dict[str, float]): to_reduce = [v for (k, v) in stats.items() if compiled.match(k)] if not to_reduce: return stats new_stats = deepcopy(stats) new_stats[new_column] = functools.reduce(func, to_reduce) return new_stats return inner def sum_work_perf_event(event_name): pattern = "work\\.\\w+\\.{}\\.total".format(event_name) new_column = "work.{}.total".format(event_name) return reduce_stats(pattern, new_column, lambda x, y: x + y) def ratio_work_perf_event(event_name: str): pattern = "work\\.\\w+\\.{}\\.total".format(event_name) aggregated_column = "work.{}.total".format(event_name) compiled = re.compile(pattern) def inner(stats: Dict[str, float]): new_stats = deepcopy(stats) for (k, v) in stats.items(): if compiled.match(k): new_column = k.replace(".total", ".ratio") new_stats[new_column] = v / stats[aggregated_column] return new_stats return inner def ratio_event(event_name: str): def inner(stats: Dict[str, float]): new_stats = deepcopy(stats) stw_key = "{}.stw".format(event_name) other_key = "{}.other".format(event_name) if stw_key in stats and other_key in stats: gc = stats[stw_key] mu = stats[other_key] total = gc + mu new_stats["{}.ratio".format(stw_key)] = gc / total new_stats["{}.ratio".format(other_key)] = mu / total return new_stats return inner def calc_ipc(stats: Dict[str, float]): new_stats = deepcopy(stats) for phase in ["mu", "gc"]: inst = stats.get("PERF_COUNT_HW_INSTRUCTIONS.{}".format(phase)) cycles = stats.get("PERF_COUNT_HW_CPU_CYCLES.{}".format(phase)) if inst is not None and cycles is not None: if cycles == 0: assert inst == 0 continue new_stats["INSTRUCTIONS_PER_CYCLE.{}".format( phase)] = inst / cycles return new_stats def calc_work_ipc(stats: Dict[str, float]): pattern = "work\\.\\w+\\.PERF_COUNT_HW_INSTRUCTIONS\\.total" compiled = re.compile(pattern) new_stats = deepcopy(stats) for (k, v) in stats.items(): if compiled.match(k): cycles = k.replace("PERF_COUNT_HW_INSTRUCTIONS", "PERF_COUNT_HW_CPU_CYCLES") ipc = k.replace("PERF_COUNT_HW_INSTRUCTIONS.total", "INSTRUCTIONS_PER_CYCLE.ratio") new_stats[ipc] = stats[k] / stats[cycles] return new_stats def stat_sort_helper(key: str, value: float): if len(key.split(".")) > 1: return key.split(".")[-2], -value else: return key, -value def process_lines(configuration: Configuration, lines: List[str]): new_lines = [] editing = EditingMode.NotEditing names = [] funcs: List[Any] funcs = [] if configuration.get("preprocessing") is None: funcs = [] else: for f in configuration.get("preprocessing"): if f["name"] == "sum_work_perf_event": for v in f["val"].split(","): funcs.append(sum_work_perf_event(v)) elif f["name"] == "ratio_work_perf_event": for v in f["val"].split(","): funcs.append(ratio_work_perf_event(v)) elif f["name"] == "calc_work_ipc": funcs.append(calc_work_ipc) elif f["name"] == "ratio_event": for v in f["val"].split(","): funcs.append(ratio_event(v)) elif f["name"] == "filter_stats": patterns_to_keep = f["val"].split(",") funcs.append(filter_stats(lambda n: any( [p in n for p in patterns_to_keep]))) elif f["name"] == "calc_ipc": funcs.append(calc_ipc) else: raise ValueError("Not supported preprocessing functionality") for line in lines: if line.strip() == MMTk_HEADER: new_lines.append(line) editing = EditingMode.Names continue elif line.strip() == MMTk_FOOTER: assert editing == EditingMode.TotalTime new_lines.append(line) editing = EditingMode.NotEditing continue if editing == EditingMode.Names: names = line.strip().split("\t") editing = EditingMode.Values elif editing == EditingMode.Values: values = map(float, line.strip().split("\t")) stats = dict(zip(names, values)) new_stats = functools.reduce( lambda accum, val: val(accum), funcs, stats) if len(new_stats): new_stat_list = list(new_stats.items()) new_stat_list.sort(key=lambda x: stat_sort_helper(x[0], x[1])) new_names, new_values = list(zip(*new_stat_list)) new_lines.append("{}\n".format("\t".join(new_names))) new_lines.append("{}\n".format( "\t".join(map(str, new_values)))) else: new_lines.append("empty_after_preprocessing\n") new_lines.append("0\n") editing = EditingMode.TotalTime elif editing == EditingMode.TotalTime: new_lines.append(line) else: new_lines.append(line) return new_lines def process_one_file(configuration: Configuration, original: Path, targetfile: Path): # XXX DO NOT COPY the content of the log file # Tab might not be preserved (especially around line breaks) # https://unix.stackexchange.com/questions/324676/output-tab-character-on-terminal-window with gzip.open(original, "rt") as old: with gzip.open(targetfile, "wt") as new: new.writelines(process_lines(configuration, old.readlines())) def process(configuration: Configuration, source: Path, target: Path): for file in source.glob("*.log.gz"): process_one_file(configuration, file, target / file.name) def run(args): if args.get("which") != "preproc": return False configuration = Configuration.from_file( Path(os.getcwd()), args.get("CONFIG")) source = args.get("SOURCE") target = args.get("TARGET") target.mkdir(parents=True, exist_ok=True) process(configuration, source, target) return True
/running-ng-0.4.1.tar.gz/running-ng-0.4.1/src/running/command/log_preprocessor.py
0.563258
0.230584
log_preprocessor.py
pypi
import logging from typing import DefaultDict, Dict, List, Any, Optional, Set, Tuple, BinaryIO, TYPE_CHECKING from running.suite import BenchmarkSuite, is_dry_run from running.benchmark import Benchmark, SubprocessrExit from running.config import Configuration from pathlib import Path from running.util import parse_config_str, system, get_logged_in_users, config_index_to_chr, config_str_encode import socket from datetime import datetime from running.runtime import Runtime import tempfile import subprocess import os from running.command.fillin import fillin import math import yaml if TYPE_CHECKING: from running.plugin.runbms import RunbmsPlugin from running.__version__ import __VERSION__ configuration: Configuration minheap_multiplier: float remote_host: Optional[str] skip_oom: Optional[int] skip_timeout: Optional[int] skip_log_compression: bool = False plugins: Dict[str, Any] resume: Optional[str] def setup_parser(subparsers): f = subparsers.add_parser("runbms") f.set_defaults(which="runbms") f.add_argument("LOG_DIR", type=Path) f.add_argument("CONFIG", type=Path) f.add_argument("N", type=int, nargs='?') f.add_argument("n", type=int, nargs='*') f.add_argument("-i", "--invocations", type=int) f.add_argument("-s", "--slice", type=str) f.add_argument("-p", "--id-prefix") f.add_argument("-m", "--minheap-multiplier", type=float) f.add_argument("--skip-oom", type=int) f.add_argument("--skip-timeout", type=int) f.add_argument("--resume", type=str) f.add_argument("--workdir", type=Path) f.add_argument("--skip-log-compression", action="store_true", help="Skip compressing log files") def getid() -> str: host = socket.gethostname() now = datetime.now() timestamp = now.strftime("%Y-%m-%d-%a-%H%M%S") return "{}-{}".format(host, timestamp) def spread(spread_factor: int, N: int, n: int) -> float: """Spread the numbers For example, when we have N = 8, n = 0, 1, ..., 7, N, we can have fractions of form n / N with equal distances between them: 0/8, 1/8, 2/8, ..., 7/8, 8/8 Sometimes it's desirable to transform such sequence with finer steppings at the start than at the end. The idea is that when the values are small, even small changes can greatly affect the behaviour of a system (such as the heap sizes of GC), so we want to explore smaller values at a finer granularity. For each n, we define n' = n + spread_factor / (N - 1) * (1 + 2 + ... + n-2 + n-1) For a consecutive pair (n-1, n), we can see that they are additionally (n-1) / (N-1) * spread_factor apart. For a special case when n = N, we have N' = N + N/2 * spread_factor, and (N'-1, N') is spread_factor apart. Let's concretely use spread_factor = 1 as as example. Let N and n be the same. So we have spread_factor / (N - 1) = 1/7 n = 0, n' = 0, n = 1, n' = 1. n = 2, n' = 2 + 1/7 n = 3, n' = 3 + 1/7 + 2/7 = 3 + 3/7 ... n = 7, n' = 7 + 1/7 + 2/7 + ... + 6/7 = 7 + 21/7 = 10 n = 8, n' = 8 + 1/7 + 2/7 + ... + 7/7 = 8 + 28/7 = 12 Parameters ---------- N : int Denominator spread_factor : int How much coarser is the spacing at the end relative to start spread_factor = 0 doesn't change the sequence at all n: int Nominator """ sum_1_n_minus_1 = (n*n - n) / 2 return n + spread_factor / (N - 1) * sum_1_n_minus_1 def hfac_str(hfac: float) -> str: return str(int(hfac*1000)) def get_heapsize(hfac: float, minheap: int) -> int: return round(minheap * hfac * minheap_multiplier) def get_hfacs(heap_range: int, spread_factor: int, N: int, ns: List[int]) -> List[float]: start = 1.0 end = float(heap_range) divisor = spread(spread_factor, N, N)/(end-start) return [spread(spread_factor, N, n)/divisor + start for n in ns] def run_benchmark_with_config(c: str, b: Benchmark, runbms_dir: Path, size: Optional[int], fd: Optional[BinaryIO]) -> Tuple[bytes, SubprocessrExit]: runtime, mods = parse_config_str(configuration, c) mod_b = b.attach_modifiers(mods) mod_b = mod_b.attach_modifiers(b.get_runtime_specific_modifiers(runtime)) if size is not None: mod_b = mod_b.attach_modifiers(runtime.get_heapsize_modifiers(size)) if fd: prologue = get_log_prologue(runtime, mod_b) fd.write(prologue.encode("ascii")) output, companion_out, exit_status = mod_b.run(runtime, cwd=runbms_dir) if fd: fd.write(output) if companion_out: fd.write(b"*****\n") fd.write(companion_out) if fd: epilogue = get_log_epilogue(runtime, mod_b) fd.write(epilogue.encode("ascii")) return output, exit_status def get_filename_no_ext(bm: Benchmark, hfac: Optional[float], size: Optional[int], config: str) -> str: # If we have / in benchmark names, replace with -. safe_bm_name = bm.name.replace("/", "-") return "{}.{}.{}.{}.{}".format( safe_bm_name, # plotty uses "^(\w+)\.(\d+)\.(\d+)\.([a-zA-Z0-9_\-\.\:\,]+)\.log\.gz$" # to match filenames hfac_str(hfac) if hfac is not None else "0", size if size is not None else "0", config_str_encode(config), bm.suite_name, ) def get_filename(bm: Benchmark, hfac: Optional[float], size: Optional[int], config: str) -> str: return get_filename_no_ext(bm, hfac, size, config) + ".log" def get_filename_completed(bm: Benchmark, hfac: Optional[float], size: Optional[int], config: str) -> str: return "{}.gz".format(get_filename(bm, hfac, size, config)) def get_log_epilogue(runtime: Runtime, bm: Benchmark) -> str: return "" def hz_to_ghz(hzstr: str) -> str: return "{:.2f} GHz".format(int(hzstr) / 1000 / 1000) def get_log_prologue(runtime: Runtime, bm: Benchmark) -> str: output = "\n-----\n" output += "mkdir -p PLOTTY_WORKAROUND; timedrun; " output += bm.to_string(runtime) output += "\n" output += "running-ng v{}\n".format(__VERSION__) output += system("date") + "\n" output += system("w") + "\n" output += system("vmstat 1 2") + "\n" output += system("top -bcn 1 -w512 |head -n 12") + "\n" output += "Environment variables: \n" for k, v in sorted(os.environ.items()): output += "\t{}={}\n".format(k, v) output += "OS: " output += system("uname -a") output += "CPU: " output += system("cat /proc/cpuinfo | grep 'model name' | head -1") output += "number of cores: " cores = system("cat /proc/cpuinfo | grep MHz | wc -l") output += cores has_cpufreq = Path("/sys/devices/system/cpu/cpu0/cpufreq").is_dir() if has_cpufreq: for i in range(0, int(cores)): output += "Frequency of cpu {}: ".format(i) output += hz_to_ghz( system("cat /sys/devices/system/cpu/cpu{}/cpufreq/scaling_cur_freq".format(i))) output += "\n" output += "Governor of cpu {}: ".format(i) output += system("cat /sys/devices/system/cpu/cpu{}/cpufreq/scaling_governor".format(i)) output += "Scaling_min_freq of cpu {}: ".format(i) output += hz_to_ghz( system("cat /sys/devices/system/cpu/cpu{}/cpufreq/scaling_min_freq".format(i))) output += "\n" return output def run_one_benchmark( invocations: int, suite: BenchmarkSuite, bm: Benchmark, hfac: Optional[float], configs: List[str], runbms_dir: Path, log_dir: Path ): p: "RunbmsPlugin" bm_name = bm.name print(bm_name, end=" ") size: Optional[int] # heap size measured in MB if hfac is not None: print(hfac_str(hfac), end=" ") size = get_heapsize(hfac, suite.get_minheap(bm)) print(size, end=" ") else: size = None for p in plugins.values(): p.start_benchmark(hfac, size, bm) oomed_count: DefaultDict[str, int] oomed_count = DefaultDict(int) timeout_count: DefaultDict[str, int] timeout_count = DefaultDict(int) logged_in_users: Set[str] logged_in_users = get_logged_in_users() if len(logged_in_users) > 1: logging.warning("More than one user logged in: {}".format( " ".join(logged_in_users))) ever_ran = [False] * len(configs) for i in range(0, invocations): for p in plugins.values(): p.start_invocation(hfac, size, bm, i) print(i, end="", flush=True) for j, c in enumerate(configs): config_passed = False for p in plugins.values(): p.start_config(hfac, size, bm, i, c, j) if skip_oom is not None and oomed_count[c] >= skip_oom: print(".", end="", flush=True) continue if skip_timeout is not None and timeout_count[c] >= skip_timeout: print(".", end="", flush=True) continue if resume: log_filename_completed = get_filename_completed( bm, hfac, size, c) if (log_dir / log_filename_completed).exists(): print(config_index_to_chr(j), end="", flush=True) continue log_filename = get_filename(bm, hfac, size, c) logging.debug("Running with log filename {}".format(log_filename)) runtime, _ = parse_config_str(configuration, c) if is_dry_run(): output, exit_status = run_benchmark_with_config( c, bm, runbms_dir, size, None ) assert exit_status is SubprocessrExit.Dryrun else: fd: BinaryIO with (log_dir / log_filename).open("ab") as fd: output, exit_status = run_benchmark_with_config( c, bm, runbms_dir, size, fd ) ever_ran[j] = True if runtime.is_oom(output): oomed_count[c] += 1 if exit_status is SubprocessrExit.Timeout: timeout_count[c] += 1 print(".", end="", flush=True) elif exit_status is SubprocessrExit.Error: print(".", end="", flush=True) elif exit_status is SubprocessrExit.Normal: if suite.is_passed(output): config_passed = True print(config_index_to_chr(j), end="", flush=True) else: print(".", end="", flush=True) elif exit_status is SubprocessrExit.Dryrun: print(".", end="", flush=True) else: raise ValueError("Not a valid SubprocessrExit value") for p in plugins.values(): p.end_config(hfac, size, bm, i, c, j, config_passed) for p in plugins.values(): p.end_invocation(hfac, size, bm, i) for p in plugins.values(): p.end_benchmark(hfac, size, bm) for j, c in enumerate(configs): log_filename = get_filename(bm, hfac, size, c) # Check that this is not a dry-run and we have actually executed this # config for a particular benchmark/hfac (method parameters) if not is_dry_run() and ever_ran[j]: if not skip_log_compression: subprocess.check_call([ "gzip", log_dir / log_filename ]) print() def run_one_hfac( invocations: int, hfac: Optional[float], suites: Dict[str, BenchmarkSuite], benchmarks: Dict[str, List[Benchmark]], configs: List[str], runbms_dir: Path, log_dir: Path ): p: "RunbmsPlugin" for p in plugins.values(): p.start_hfac(hfac) for suite_name, bms in benchmarks.items(): suite = suites[suite_name] for bm in bms: run_one_benchmark(invocations, suite, bm, hfac, configs, runbms_dir, log_dir) rsync(log_dir) for p in plugins.values(): p.end_hfac(hfac) def ensure_remote_dir(log_dir): if not is_dry_run() and remote_host is not None: log_dir = log_dir.resolve() system("ssh {} mkdir -p {}".format(remote_host, log_dir)) def rsync(log_dir): if not is_dry_run() and remote_host is not None: log_dir = log_dir.resolve() system("rsync -ae ssh {}/ {}:{}".format(log_dir, remote_host, log_dir)) def run(args): if args.get("which") != "runbms": return False with tempfile.TemporaryDirectory(prefix="runbms-") as runbms_dir: logging.info("Temporary directory: {}".format(runbms_dir)) if args.get("workdir"): args.get("workdir").mkdir(parents=True, exist_ok=True) runbms_dir = str(args.get("workdir").resolve()) # Processing command lines args global resume resume = args.get("resume") if resume: run_id = resume else: prefix = args.get("id_prefix") run_id = getid() if prefix: run_id = "{}-{}".format(prefix, run_id) print("Run id: {}".format(run_id)) log_dir = args.get("LOG_DIR") / run_id if not is_dry_run(): log_dir.mkdir(parents=True, exist_ok=True) with (log_dir / "runbms_args.yml").open("w") as fd: yaml.dump(args, fd) N = args.get("N") ns = args.get("n") slice = args.get("slice") slice = [float(s) for s in slice.split(",")] if slice else [] global skip_oom skip_oom = args.get("skip_oom") global skip_timeout skip_timeout = args.get("skip_timeout") global skip_log_compression skip_log_compression = args.get("skip_log_compression") # Load from configuration file global configuration configuration = Configuration.from_file( Path(os.getcwd()), args.get("CONFIG")) # Save metadata if not is_dry_run(): with (log_dir / "runbms.yml").open("w") as fd: configuration.save_to_file(fd) configuration.resolve_class() # Read from configuration, override with command line arguments if # needed invocations = configuration.get("invocations") if args.get("invocations"): invocations = args.get("invocations") global minheap_multiplier minheap_multiplier = configuration.get("minheap_multiplier") if args.get("minheap_multiplier"): minheap_multiplier = args.get("minheap_multiplier") heap_range = configuration.get("heap_range") spread_factor = configuration.get("spread_factor") suites = configuration.get("suites") benchmarks = configuration.get("benchmarks") if benchmarks is None: benchmarks = {} configs = configuration.get("configs") global remote_host remote_host = configuration.get("remote_host") if not is_dry_run() and remote_host is not None: ensure_remote_dir(log_dir) global plugins plugins = configuration.get("plugins") if plugins is None: plugins = {} else: from running.plugin.runbms import RunbmsPlugin if type(plugins) is not dict: raise TypeError("plugins must be a dictionary") plugins = {k: RunbmsPlugin.from_config( k, v) for k, v in plugins.items()} for p in plugins.values(): p.set_run_id(run_id) p.set_runbms_dir(runbms_dir) p.set_log_dir(log_dir) def run_hfacs(hfacs): logging.info("hfacs: {}".format( ", ".join([ hfac_str(hfac) for hfac in hfacs ]) )) for hfac in hfacs: run_one_hfac(invocations, hfac, suites, benchmarks, configs, Path(runbms_dir), log_dir) print() def run_N_ns(N, ns): hfacs = get_hfacs(heap_range, spread_factor, N, ns) run_hfacs(hfacs) if slice: run_hfacs(slice) return True if N is None: run_one_hfac(invocations, None, suites, benchmarks, configs, Path(runbms_dir), log_dir) return True if len(ns) == 0: fillin(run_N_ns, round(math.log2(N))) else: run_N_ns(N, ns) return True
/running-ng-0.4.1.tar.gz/running-ng-0.4.1/src/running/command/runbms.py
0.858021
0.214177
runbms.py
pypi
from pathlib import Path from typing import Any, Dict, Optional, TYPE_CHECKING if TYPE_CHECKING: from running.benchmark import Benchmark class RunbmsPlugin(object): CLS_MAPPING: Dict[str, Any] CLS_MAPPING = {} def __init__(self, **kwargs): self.name = kwargs["name"] self.run_id = None self.runbms_dir: Optional[Path] self.runbms_dir = None self.log_dir: Optional[Path] self.log_dir = None def set_run_id(self, run_id: Path): self.run_id = run_id def set_runbms_dir(self, runbms_dir: str): self.runbms_dir = Path(runbms_dir).resolve() def set_log_dir(self, log_dir: Path): self.log_dir = log_dir @staticmethod def from_config(name: str, config: Dict[str, str]) -> Any: return RunbmsPlugin.CLS_MAPPING[config["type"]](name=name, **config) def __str__(self) -> str: return "RunbmsPlugin {}".format(self.name) def start_hfac(self, _hfac: Optional[float]): pass def end_hfac(self, _hfac: Optional[float]): pass def start_benchmark(self, _hfac: Optional[float], _size: Optional[int], _bm: "Benchmark"): pass def end_benchmark(self, _hfac: Optional[float], _size: Optional[int], _bm: "Benchmark"): pass def start_invocation(self, _hfac: Optional[float], _size: Optional[int], _bm: "Benchmark", _invocation: int): pass def end_invocation(self, _hfac: Optional[float], _size: Optional[int], _bm: "Benchmark", _invocation: int): pass def start_config(self, _hfac: Optional[float], _size: Optional[int], _bm: "Benchmark", _invocation: int, _config: str, _config_index: int): pass def end_config(self, _hfac: Optional[float], _size: Optional[int], _bm: "Benchmark", _invocation: int, _config: str, _config_index: int, _passed: bool): pass # !!! Do NOT remove this import nor change its position # This is to make sure that the plugin classes are correctly registered from running.plugin.runbms.copyfile import CopyFile if TYPE_CHECKING: from running.plugin.runbms.zulip import Zulip else: try: from running.plugin.runbms.zulip import Zulip except: from running.util import register @register(RunbmsPlugin) class Zulip(RunbmsPlugin): def __init__(self, **kwargs): raise RuntimeError("Trying to create an instance of the Zulip " "plugin for runbms, but the import failed. " "This is most likely due to the required " "dependencies not being installed. Try pip " "install running-ng[zulip] to install the extra dependencies.")
/running-ng-0.4.1.tar.gz/running-ng-0.4.1/src/running/plugin/runbms/__init__.py
0.847968
0.172329
__init__.py
pypi
# Quickstart This guide will show you how to use `running-ng` to compare two different builds of JVMs. **Note that for each occurrence in the form `/path/to/*`, you need to replace it with the real path of the respective item in the filesystem.** ## Installation Please follow the [installation guide](./install.md) to install `running-ng`. You will need Python 3.6+. Then, create a file `two_builds.yml` with the following content. ```yaml includes: - "$RUNNING_NG_PACKAGE_DATA/base/runbms.yml" ``` The [YAML file](./references/index.md) represents a dictionary (key-value pairs) that defines the experiments you are running. The `includes` directive here will populate the dictionary with some default values shipped with `running-ng`. **If you use moma machines, please substitute `runbms.yml` with `runbms-anu.yml`.** ## Prepare Benchmarks Add the following to `two_builds.yml`. ```yaml benchmarks: dacapochopin-29a657f: - avrora - batik - biojava - cassandra - eclipse - fop - graphchi - h2 - h2o - jme - jython - luindex - lusearch - pmd - sunflow - tradebeans - tradesoap - tomcat - xalan - zxing ``` This specify a list of benchmarks used in this experiment from the [benchmark suite](./references/suite.md) `dacapochopin-29a657f`. The benchmark suite is defined in `$RUNNING_NG_PACKAGE_DATA/base/dacapo.yml`. By default, the minimum heap sizes of `dacapochopin-29a657f` benchmarks are measured with AdoptOpenJDK 15 using G1 GC. If you are using OpenJDK 11 or 17, you can override the value of `suites.dacapochopin-29a657f.minheap` to `temurin-17-G1` or `temurin-11-G1`. That is, you can, for example, add `"suites.dacapochopin-29a657f.minheap": "temurin-17-G1"` to `overrides`. Then, add the following to `two_builds.yml`. ```yaml overrides: "suites.dacapochopin-29a657f.timing_iteration": 5 "suites.dacapochopin-29a657f.callback": "probe.DacapoChopinCallback" ``` That is, we want to run five iterations for each invocation, and use `DacapoChopinCallback` because it is the appropriate callback for this release of DaCapo. ## Prepare Your Builds In this guide, we assume you use [`mmtk-openjdk`](https://github.com/mmtk/mmtk-openjdk). Please follow its build guide. I assume you produced two different builds you want to compare. Add the following to `two_builds.yml`. ```yaml runtimes: build1: type: OpenJDK release: 11 home: "/path/to/build1/jdk" # make sure /path/to/build1/jdk/bin/java exists build2: type: OpenJDK release: 11 home: "/path/to/build2/jdk" # make sure /path/to/build2/jdk/bin/java exists ``` This defines two builds of [runtimes](./references/runtime.md). I recommend that you use absolute paths for the builds, although relative paths will work, and will be relative to where you run `running`. I strongly recommend you rename the builds (both the name in the configuration file and the folder name) to something more sensible, preferably with the commit hash for easy troubleshooting and performance debugging later. ## Prepare Probes Please clone [`probes`](https://github.com/anupli/probes), and run `make`. Add the following to `two_builds.yml`. ```yaml modifiers: probes_cp: type: JVMClasspath val: "/path/to/probes/out /path/to/probes/out/probes.jar" probes: type: JVMArg val: "-Djava.library.path=/path/to/probes/out -Dprobes=RustMMTk" ``` This defines two [modifiers](./references/modifier.md), which will be used later to modify the JVM command line arguments. Please only use absolute paths for all the above. ## Prepare Configs Finally, add he following to `two_builds.yml`. ```yaml configs: - "build1|ms|s|c2|mmtk_gc-SemiSpace|tph|probes_cp|probes" - "build2|ms|s|c2|mmtk_gc-SemiSpace|tph|probes_cp|probes" ``` The syntax is described [here](./references/index.md#configs). ## Sanity Checks The basic form of usage looks like this. ```console running runbms /path/to/log two_builds.yml 8 ``` That is, run the experiments as specified by `two_builds.yml`, store the results in `/path/to/log`, and explore eight different heap sizes (with careful arrangement of which size to run first and which to run later). See [here](./commands/runbms.md) for a complete reference of `runbms`. ### Dry run A dry run (by supplying `-d` to `running` **NOT** `runbms`) allows you to see the commands to be executed. ```console running -d runbms /path/to/log two_builds.yml 8 -i 1 ``` Make sure it looks like what you want. ### Single Invocation Now, actually run the experiment, but only for one invocation (by supplying `-i 1` to `runbms`). ```console running runbms /path/to/log two_builds.yml 8 -i 1 ``` This allows you to see any issue before wasting several days only realizing that something didn't work. ## Run It Once you are happy with everything, run the experiments. ```console running runbms /path/to/log two_builds.yml 8 -p "two_builds" ``` Don't forget to give the results folder a prefix so that you can later tell what the experiment was for. ### Analysing Results This is outside the scope of this quickstart guide.
/running-ng-0.4.1.tar.gz/running-ng-0.4.1/docs/src/quickstart.md
0.436502
0.930774
quickstart.md
pypi
# Configuration File Syntax The configuration file is in YAML format. You can find a good YAML tutorial [here](https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html). Below is the documentation for all the top-level keys that are common to all commands. ## `benchmarks` A YAML list of benchmarks to run in each specified [benchmark suite](#suites). For example: ```yaml benchmarks: dacapo2006: - eclipse dacapobach: - avrora - fop ``` specifies `running` to run the `eclipse` benchmark from the `dacapo2006` benchmark suite; and the `avrora` and `fop` benchmarks from the `dacapobach` benchmark suite. These benchmark suites have to be defined previously (usually through an [`includes` key](#includes)). Note that each benchmark of a benchmark suite can either be a string or a suite-specific dictionary. For example, for the DaCapo benchmark suite, the following two snippets are equivalent. ```yaml benchmarks: dacapo2006: - eclipse ``` ```yaml benchmarks: dacapo2006: - {name: eclipse, bm_name: eclipse, size: default} ``` ## `configs` A YAML list of configuration strings to be used to run the benchmarks. These are specified as a [`runtime`](#runtimes) followed by a `'|'` separated list of [modifiers](./modifier.md), i.e. `"<runtime>|<modifier>|...|<modifier>"`. For example: ```yaml configs: - "openjdk11|ms|s|c2" - "openjdk15|ms|s" ``` specifies `running` to use the `openjdk11` `runtime` with `ms`, `s`, and `c2` modifiers; and the `openjdk15` `runtime` with the `ms`, and `s` modifiers. In the example above, we assume that both the `runtimes` and modifiers have been previously defined (in either the current configuration file or in an [`includes` file](#includes)). ## `includes` A YAML list of paths to YAML files that are to be included into the current configuration file for definitions of some keys. This is primarily used to provide re-usability and extensibility of configuration files. A pre-processor step in `running` takes care of including all the specified files. A flattened version of the final configuration file is also generated and placed in the results folder for reproducibility. The paths can be either absolute or relative. Relative paths are solved relative to the current file. For example, if `$HOME/configs/foo.yml` has an `include` line `../bar.yml`, the line is interpreted as `$HOME/bar.yml`. Similarly, ```yaml includes: - "./base/suites.yml" - "./base/modifiers.yml" ``` includes the `suites.yml` and `modifiers.yml` files located at `./base` respectively. Any environment variable in the paths are also resolved before any further processing. This include a special environment variable `$RUNNING_NG_PACKAGE_DATA` that allows you to refer to various configuration files shipping with running-ng, regardless how you installed running-ng. For example, in a global `pip` installation, `$RUNNING_NG_PACKAGE_DATA` will look like `/usr/local/lib/python3.10/dist-packages/running/config`. ## `overrides` Under construction 🚧. ## `modifiers` A YAML dictionary of program arguments or environment variables that are to be used with [config strings](#configs). Cannot use `-` in the key for a modifier. Each modifier requires a `type` key with other keys being specific to that `type`. For more information regarding the different `type`s of modifiers, please refer to [this page](./modifier.md). **Warning preview feature ⚠️**. We can exclude certain benchmarks from using a specific modifier by using an `exclude` key along with a YAML list of benchmarks to be excluded from each benchmark suite. For example: ```yaml modifiers: s: type: JVMArg val: "-server" c2: type: JVMArg val: "-XX:-TieredCompilation -Xcomp" excludes: dacapo2006: - eclipse ``` specifies two modifiers, `s` and `c2`, both of `type` [`JVMArg`](./modifier.md#JVMArg) with their respective values. Here, the `eclipse` benchmark from the `dacapo2006` benchmark suite has been excluded from the `c2` modifier. ### Value Options These are special modifiers whose values can be specified through their use in a [configuration string](#configs). Concrete values are specified as `-` separated values after the modifier's name in a configuration string. These values will be indexed by the modifier through syntax similar to Python format strings. This is best understood via an example: ```yaml modifiers: env_var: type: EnvVar var: "FOO{0}" val: "{1}" [...] configs: - "openjdk11|env_var-42-43" ``` specifies to run the `openjdk11` [`runtime`](#runtimes) with the environment variable `FOO42` set to `43`. Note that value options are not limited only to environment variables, and can be used for all modifier `type`s. ## `runtimes` A YAML dictionary of runtime definitions that are to be used with [config strings](#configs). Each runtime requires a `type` key with other keys being specific to that `type`. For more information regarding the different `type`s of runtimes, please refer to [this page](./runtime.md). ## `suites` A YAML dictionary of benchmark suite definitions that are to be used as keys of `benchmarks`. Each benchmark suite requires a `type` key with other keys being specific to that `type`. For more information regarding the different `type`s of benchmark suites, please refer to [this page](./suite.md).
/running-ng-0.4.1.tar.gz/running-ng-0.4.1/docs/src/references/index.md
0.598664
0.961207
index.md
pypi
# Benchmark Suite ## `BinaryBenchmarkSuite` (preview ⚠️) A `BinaryBenchmarkSuite` is a suite of programs which can be used to run binary benchmarks such as for C/C++ benchmarking. ### Keys `programs`: A yaml list of benchmarks in the format: ```yaml programs: <BM_NAME_1>: path: /full/path/to/benchmark/binary_1 args: "Any arguments to binary_1" <BM_NAME_2>: path: /full/path/to/benchmark/binary_2 args: "Any arguments to binary_2" [...] ``` A possible use-case could use wrapper shell scripts around the benchmark to output timing and other information in a tab-separated table. ## `DaCapo` [DaCapo benchmark suite](https://www.dacapobench.org/). ### Keys `release`: one of the possible values `["2006", "9.12", "evaluation"]`. The value is required. `path`: path to the DaCapo `jar`. The value is required. `minheap`: a string that selects one of the `minheap_values` sets to use. `minheap_values`: a dictionary containing multiple named sets of minimal heap sizes that is enough for a benchmark from the suite to run without triggering `OutOfMemoryError`. Each size is measured in MiB. The default value is an empty dictionary. The minheap values are used only when running `runbms` with a valid `N` value. If the minheap value for a benchmark is not specified, a default of `4096` is used. An example looks like this. ```yaml minheap_values: adoptopenjdk-15-G1: avrora: 7 batik: 253 temurin-17-G1: avrora: 7 batik: 189 ``` `timing_iteration`: specifying the timing iteration. It can either be a number, which is passed to DaCapo as `-n`, or a string `converge`. The default value is 3. `callback`: the class (possibly within some packages) for the DaCapo callback. The value is passed to DaCapo as `-c`. The default value is `null`. `timeout`: timeout for one invocation of a benchmark in seconds. The default value is `null`. `wrapper` (preview ⚠️): specifying a wrapper (i.e., extra stuff on the command line before `java`) when running benchmarks. The default value is `null`, a no-op. There are two possible ways to specify `wrapper`. First, a single string with [shell-like syntax](https://docs.python.org/3/library/shlex.html#shlex.split). Multiple arguments are space separated. This wrapper is used for all benchmarks in the benchmark suite. Second, a dictionary of strings with shell-like syntax to specify possibly different wrappers for different benchmarks. If a benchmark doesn't have a wrapper in the dictionary, it is treated as `null`. `companion` (preview ⚠️): the syntax is similar to `wrapper`. The companion program will start before the main program. The main program will start two seconds after the companion program to make sure the companion is fully initialized. Once the main program finishes, we will wait for the companion program to finish. Therefore, companion programs should have appropriate timeouts or detect when main program finishes. Here is an example of using `companion` to launch `bpftrace` in the background to count the system calls. ```yaml includes: - "$RUNNING_NG_PACKAGE_DATA/base/runbms.yml" overrides: "suites.dacapo2006.timing_iteration": 1 "suites.dacapo2006.companion": "sudo bpftrace -e 'tracepoint:raw_syscalls:sys_enter { @syscall[args->id] = count(); @process[comm] = count();} interval:s:10 { printf(\"Goodbye world!\\n\"); exit(); }'" "invocations": 1 benchmarks: dacapo2006: - fop configs: - "temurin-17" ``` In the log file, the output from the main program and the output from the companion program is separated by `*****`. `size`: specifying the size of input data. Note that the names of the sizes are subject to change depending on the DaCapo releases. The default value is `null`, which means DaCapo will use the default size unless you override that for individual benchmarks. ### Benchmark Specification Some of the suite-wide keys can be overridden in a per-benchmark-basis. The keys currently supported are `timing_iteration`, `size`, and `timeout`. Note that, within a suite, your choice of `name` should uniquely identify a particular way of running a benchmark of name `bm_name`. The `name` is used to get the minheap value, etc., which can depend of the size of input data and/or the timing iteration. Therefore, it is highly recommended that you give a `name` different from the `bm_name`. Note that, you might need to adjust various other values, including but not limit to the minheap value dictionary and the modifier exclusion dictionary. The following is an example. ```yaml benchmarks: dacapo2006: - {name: eclipse_large, bm_name: eclipse, size: large} ``` ## `SPECjbb2015` (preview ⚠️) [SPECjbb2015](https://www.spec.org/jbb2015/). ### Keys `release`: one of the possible values `["1.03"]`. The value is required. `path`: path to the `jar`. The value is required. Note that the property file should reside in `path/../config/specjbb2015.props` per the standard folder structure of the ISO image provided by SPEC. ### Benchmark Specification Only strings are allowed, which should correspond to the the mode of the SPECjbb2015 controller. Right now, only `"composite"` is supported. ## `SPECjvm98` (preview ⚠️) [SPECjvm98](https://www.spec.org/jvm98/). Note that you will need to prepend probes to the classpaths, so that the [modified](https://github.com/anupli/probes/blob/master/SpecApplication.java) `SpecApplication` can be used. Here is an example configuration file. ```yaml includes: - "/home/zixianc/running-ng/src/running/config/base/runbms.yml" modifiers: probes_cp: type: JVMClasspathPrepend val: "/home/zixianc/MMTk-Dev/evaluation/probes /home/zixianc/MMTk-Dev/evaluation/probes/probes.jar" benchmarks: specjvm98: - _213_javac configs: - "adoptopenjdk-8|probes_cp" ``` ### Keys `release`: one of the possible values `["1.03_05"]`. The value is required. `path`: path to the SPECjvm98 folder, where you can find `SpecApplication.class`. The value is required. `timing_iteration`: specifying the timing iteration. It can only be a number, which is passed to SpecApplication as `-i`. The value is required. ### Benchmark Specification Only strings are allowed, which should correspond to benchmark program of SPECjvm98. The following are the benchmarks: - _200_check - _201_compress - _202_jess - _209_db - _213_javac - _222_mpegaudio - _227_mtrt - _228_jack ## `Octane` (preview ⚠️) ### Keys `path`: path to the Octane benchmark folder. The value is required. `wrapper`: path to the Octane wrapper written by Wenyu Zhao. The value is required. `timing_iteration`: specifying the timing iteration using an integer. The value is required. `minheap`: a string that selects one of the `minheap_values` sets to use. `minheap_values`: a dictionary containing multiple named sets of minimal heap sizes that is enough for a benchmark from the suite to run without triggering `Fatal javascript OOM in ...`. Each size is measured in MiB. The default value is an empty dictionary. The minheap values are used only when running `runbms` with a valid `N` value. If the minheap value for a benchmark is not specified, a default of `4096` is used. An example looks like this. ```yaml minheap_values: d8: octane: box2d: 5 codeload: 159 crypto: 3 ``` ## `JuliaGCBenchmarks` (preview ⚠️) GC benchmarks for Julia: https://github.com/JuliaCI/GCBenchmarks ### Keys `path`: path to the GCBenchmarks folder. The value is required. `minheap`: a string that selects one of the `minheap_values` sets to use. `minheap_values`: a dictionary containing multiple named sets of minimal heap sizes that is enough for a benchmark from the suite to run without triggering `Out of Memory!`. An example looks like this: ```yaml minheap_values: julia-mmtk-immix: multithreaded/binary_tree/tree_immutable: 225 multithreaded/binary_tree/tree_mutable: 384 multithreaded/bigarrays/objarray: 9225 serial/TimeZones: 5960 serial/append: 1563 serial/bigint/pollard: 198 serial/linked/list: 4325 serial/linked/tree: 216 serial/strings/strings: 2510 slow/bigint/pidigits: 198 slow/rb_tree/rb_tree: 8640 ```
/running-ng-0.4.1.tar.gz/running-ng-0.4.1/docs/src/references/suite.md
0.400984
0.928474
suite.md
pypi
from .exceptions import DistanceOutOfBoundsError C_K1 = 0.0654 C_K2 = 0.00258 C_A = 85 C_B = 950 PORTUGESE_TABLE = ( (40, 11), (50, 10.9960), (60, 10.9830), (70, 10.9620), (80, 10.934), (90, 10.9000), (100, 10.8600), (110, 10.8150), (120, 10.765), (130, 10.7110), (140, 10.6540), (150, 10.5940), (160, 10.531), (170, 10.4650), (180, 10.3960), (200, 10.2500), (220, 10.096), (240, 9.9350), (260, 9.7710), (280, 9.6100), (300, 9.455), (320, 9.3070), (340, 9.1660), (360, 9.0320), (380, 8.905), (400, 8.7850), (450, 8.5130), (500, 8.2790), (550, 8.083), (600, 7.9210), (700, 7.6690), (800, 7.4960), (900, 7.32000), (1000, 7.18933), (1200, 6.98066), (1500, 6.75319), (2000, 6.50015), (2500, 6.33424), (3000, 6.21913), (3500, 6.13510), (4000, 6.07040), (4500, 6.01822), (5000, 5.97432), (6000, 5.90181), (7000, 5.84156), (8000, 5.78889), (9000, 5.74211), (10000, 5.70050), (12000, 5.62944), (15000, 5.54300), (20000, 5.43785), (25000, 5.35842), (30000, 5.29298), (35000, 5.23538), (40000, 5.18263), (50000, 5.08615), (60000, 4.99762), (80000, 4.83617), (100000, 4.68988), ) def _fraction_on_turns(distance): track_length = 400 if distance < 110: return 0 laps = int(distance / track_length) meters = distance % track_length if meters <= 50: part_lap = 0 elif meters <= 150: part_lap = meters - 50 elif meters <= 250: part_lap = 100 elif meters <= 350: part_lap = 100 + (meters - 250) elif meters <= 400: part_lap = 200 turn_distance = laps * 0.5 * track_length + part_lap return turn_distance / distance def _interpolate(distance): c1 = 0.2 c2 = 0.08 c3 = 0.0065 if distance < PORTUGESE_TABLE[0][0] or distance > PORTUGESE_TABLE[-1][0]: raise DistanceOutOfBoundsError() for index in range(len(PORTUGESE_TABLE)): if distance <= PORTUGESE_TABLE[index + 1][0]: lower_distance, lower_velocity = PORTUGESE_TABLE[index] upper_distance, upper_velocity = PORTUGESE_TABLE[index + 1] break lower_time = lower_distance / lower_velocity upper_time = upper_distance / upper_velocity time_interpolated = ( lower_time + (upper_time - lower_time) * (distance - lower_distance) / (upper_distance - lower_distance) ) velocity_interpolated = distance / time_interpolated time_950 = ( time_interpolated + c1 + c2 * velocity_interpolated + c3 * _fraction_on_turns(distance) * pow(velocity_interpolated, 2) ) return velocity_interpolated, time_950 def purdy(distance, time, digits=2): """ Returns Purdy Points based on Portugese Tables """ velocity_interpolated, time_950 = _interpolate(distance) k = C_K1 - C_K2 * velocity_interpolated a = C_A / k b = 1 - C_B / a points = a * (time_950 / time - b) return points if digits is None else round(points, digits) def purdy_prediction(distance, time, distance_to_predict): points = purdy(distance, time, digits=None) velocity_interpolated, time_950 = _interpolate(distance_to_predict) k = C_K1 - C_K2 * velocity_interpolated a = C_A / k b = 1 - C_B / a time_to_predict = time_950 / (points / a + b) return int(round(time_to_predict))
/running_performance-0.2.1-py3-none-any.whl/running_performance/purdy.py
0.640074
0.439928
purdy.py
pypi
from datetime import datetime, timedelta from .helpers import string_to_date from .helpers import convert_distance class Stats: """It's calculate statistics for the runner""" def __init__(self, runner, **kwargs): self.runner = runner self.from_date = kwargs.get('from_date') self.to_date = kwargs.get('to_date') self.race_type = kwargs.get('race_type') self.race_results = self.__filter_race() def km_count(self): return sum(race.distance for race in self.race_results) def best_time_on_distance(self, distance): conv_distance = convert_distance(distance) filter_race_with_disntace = [ race for race in self.race_results if race.distance == conv_distance] if not filter_race_with_disntace: raise ValueError("Runner don't have race results") best_result = min(filter_race_with_disntace, key=lambda race: race.result_of_the_race, ) return best_result.result_of_the_race def longest_run(self): if not self.race_results: raise ValueError("Runner don't have race results") return max(race.distance for race in self.race_results) @property def from_date(self): return self.__from_date @from_date.setter def from_date(self, from_date): self.__from_date = from_date if from_date: self.__from_date = string_to_date(from_date) @property def to_date(self): return self.__to_date @to_date.setter def to_date(self, to_date): self.__to_date = to_date if to_date: self.__to_date = string_to_date(to_date) def __filter_race(self): list_of_races = self.runner.race_results from_date = self.from_date to_date = self.to_date race_type = self.race_type from_date = from_date or \ (datetime.now() - timedelta(days=100*365)).date() to_date = to_date or datetime.now().date() return [race for race in list_of_races if from_date <= race.race_date <= to_date and ( (race.race_type and race.race_type == race_type) or race_type is None) ]
/running_results_fetcher-0.2.2.tar.gz/running_results_fetcher-0.2.2/running_results_fetcher/stats.py
0.870322
0.255448
stats.py
pypi
from datetime import datetime from datetime import timedelta from .helpers import convert_distance class RaceResult: def __init__(self, **kwargs): self.race_name = kwargs.get('race_name', '') self.distance = kwargs.get('distance') self.runner_birth = kwargs.get('runner_birth') self.race_date = kwargs.get('race_date') self.race_type = kwargs.get('race_type') self.result_of_the_race = kwargs.get('result_of_the_race') def __eq__(self, other): if not self.race_type == other.race_type: return False if not self.race_name == other.race_name: return False if not self.distance == other.distance: return False if not self.race_date == other.race_date: return False return True def __str__(self): return "{}{}{}".format(self.race_name, self.distance, self.result_of_the_race) def __repr__(self): return "{} {}".format(self.race_type, self.result_of_the_race) @property def race_name(self): return self.__race_name @race_name.setter def race_name(self, race_name): race_name = " ".join(race_name.split()) self.__race_name = race_name @property def distance(self): return self.__distance @distance.setter def distance(self, distance): self.__distance = convert_distance(distance) @property def runner_birth(self): return self.__runner_birth @runner_birth.setter def runner_birth(self, runner_birth): if not runner_birth: self.__runner_birth = None return None if len(str(runner_birth)) == 2: runner_birth = "19"+str(runner_birth) try: self.__runner_birth = int(runner_birth) except ValueError: self.__runner_birth = None @property def race_date(self): return self.__race_date @race_date.setter def race_date(self, string_date): """Parse string and change to date""" if not string_date: self.__race_date = None return None string_date = datetime.strptime(string_date, '%Y-%m-%d') self.__race_date = string_date.date() @property def result_of_the_race(self): return self.__result_of_the_race @result_of_the_race.setter def result_of_the_race(self, string_time): """Parse string and change to time delta""" if not string_time: return None ti = string_time.split(':') try: hour = int(ti[0]) minute = int(ti[1]) second = int(ti[2]) except ValueError: time_delta = None except IndexError: time_delta = None else: time_delta = timedelta(hours=hour, minutes=minute, seconds=second) self.__result_of_the_race = time_delta
/running_results_fetcher-0.2.2.tar.gz/running_results_fetcher-0.2.2/running_results_fetcher/race_result.py
0.716913
0.197599
race_result.py
pypi
from .race_result import RaceResult from .stats import Stats class Runner: "A class represents a Runner" def __init__(self, name, birth): """ Arguments: name {str} -- name and surname of the runner birth {str} --the year of birth of a runner """ self.name = name self.birth = birth self.race_results = [] self.stats = None def add_races(self, races): """ Add a race list to the runner. A single race is in the form of a dictionary. The race is added to the runner if it meets the specified conditions. A RaceResult object is created from a single dictionary. Arguments: races(list): List of dictionaries, dictionary has keys: race_name, distance, race_date, runner_birth, result_of_the_race, race_type """ for race in races: race_result = RaceResult(**race) if self.__can_add_race(race_result): self.race_results.append(race_result) def filter_races(self, **kwargs): """The method creates a stats object and returns filtered results. Arguments: kwargs : Arguments used to create the object Stats """ stats = Stats(self, **kwargs) self.stats = stats return stats.race_results @property def stats(self): if self.__stats: return self.__stats else: return Stats(self) @stats.setter def stats(self, stats): self.__stats = stats @property def name(self): return self.__name @name.setter def name(self, name): name = " ".join(name.split()) self.__name = name @property def birth(self): return self.__birth @birth.setter def birth(self, birth): if len(str(birth)) == 2: birth = "19"+str(birth) self.__birth = int(birth) def __can_add_race(self, race_result): if not race_result.distance: return False if not race_result.result_of_the_race: return False if not race_result.runner_birth == self.birth: return False if race_result in self.race_results: return False return True
/running_results_fetcher-0.2.2.tar.gz/running_results_fetcher-0.2.2/running_results_fetcher/runner.py
0.860925
0.414366
runner.py
pypi
import re import enum import click from typing import Tuple, Optional DISTANCE_UNITS = { 'feet': 0.3048, 'foot': 0.3048, 'yard': 0.9144, 'yards': 0.9144, 'm': 1, 'meter': 1, 'meters': 1, 'k': 1000, 'km': 1000, 'kilometer': 1000, 'mile': 1609.344, 'miles': 1609.344, 'marathon': 42195, 'half': 42195 / 2, 'half-marathon': 42195 / 2 } SECOND = 1 MINUTE = 60 HOUR = 60 * MINUTE DAY = 24 * HOUR WEEK = 7 * DAY TIME_UNITS = { 's': SECOND, 'sec': SECOND, 'secs': SECOND, 'second': SECOND, 'seconds': SECOND, 'm': MINUTE, 'min': MINUTE, 'mins': MINUTE, 'minute': MINUTE, 'minutes': MINUTE, 'h': HOUR, 'hour': HOUR, 'hours': HOUR, 'd': DAY, 'day': DAY, 'days': DAY, 'w': WEEK, 'week': WEEK, 'weeks': WEEK } PACES = { 'kipchoge': 42195 / (2 * HOUR + 1 * MINUTE + 39), 'bolt': 100 / 9.58 } class Mode(enum.Enum): PACE = 0 DISTANCE = 1 TIME = 2 NOT_ENOUGH = 3 TOO_MUCH = 4 Meters = float Seconds = float MetersPerSecond = float @click.command() @click.option('--pace', '-p', help="Running pace, for example '5:30', '4min/mile', '3min/km', '8:00/mile' etc.") @click.option('--distance', '-d', help="Running distance, for example '10k', '800m', 'marathon', '1mile' etc.") @click.option('--time', '-t', help="Running time, for example '1:35:00', '2h', '100sec' etc") @click.option('--unit', '-u', help="Default distance unit to use when omitted and for result (default is kilometer).", default='km') def running(time: str, distance: str, pace: str, unit: str): mode = identify_mode(time, distance, pace) if mode == Mode.PACE: seconds = parse_time(time) meters = parse_distance(distance, unit) speed: MetersPerSecond = meters / seconds output_line('Required pace:', format_pace(speed, unit), '/' + unit) elif mode == Mode.DISTANCE: speed = parse_pace(pace, unit) seconds = parse_time(time) meters: Meters = speed * seconds output_line('Travelled distance:', format_distance(meters, unit), unit) elif mode == Mode.TIME: speed = parse_pace(pace, unit) meters = parse_distance(distance, unit) seconds: Seconds = meters / speed output_line('Elapsed time:', format_seconds(seconds), '[H:]MM:SS') elif mode == Mode.NOT_ENOUGH: error('You need to give atleast two of time, distance or pace.') elif mode == Mode.TOO_MUCH: error('You provided time, distance and pace. Try omitting one.') def output_line(pre: str, bolded: str, post: str) -> None: click.echo(f'{pre} ', nl=False) click.secho(bolded, bold=True, nl=False) click.echo(f' {post}') def error(message: str) -> None: click.secho(message, fg='red') click.echo('See running --help for further instructions.') def identify_mode( time: Optional[str], distance: Optional[str], pace: Optional[str]) -> Mode: def given(*metrics): return all(metric is not None for metric in metrics) if given(time, distance, pace): return Mode.TOO_MUCH elif given(time, pace): return Mode.DISTANCE elif given(distance, pace): return Mode.TIME elif given(distance, time): return Mode.PACE else: return Mode.NOT_ENOUGH def parse_time(time: str) -> Seconds: if ':' in time: # example '2:30:01' parts = [float(t) for t in time.split(':')] return sum(n * secs for n, secs in zip(reversed(parts), (1, 60, 3600))) if time in TIME_UNITS: return TIME_UNITS[time] num, unit = extract_num_and_unit(time) return TIME_UNITS[unit or 's'] * num def parse_distance(distance: str, default_unit: str) -> Meters: if distance in DISTANCE_UNITS: return DISTANCE_UNITS[distance] num, unit = extract_num_and_unit(distance) return DISTANCE_UNITS[unit or default_unit] * num def parse_pace(pace: str, default_unit: str) -> MetersPerSecond: if pace in PACES: return PACES[pace] if '/' in pace: time_str, distance_str = pace.split('/') time = parse_time(time_str) distance = parse_distance(distance_str, default_unit) else: time = parse_time(pace) distance = DISTANCE_UNITS[default_unit] return distance / time def extract_num_and_unit(num_and_unit) -> Tuple[float, Optional[str]]: match = re.match(r"(\d*\.\d+|\d+)([\w-]*)\Z", num_and_unit) if not match: raise ValueError(f'Invalid unit {num_and_unit}') return float(match.group(1)), match.group(2) def format_pace(speed: MetersPerSecond, unit: str) -> str: seconds = DISTANCE_UNITS[unit] / speed return format_seconds(seconds) def format_seconds(seconds: Seconds) -> str: sec = round(seconds) m, s = divmod(sec, 60) h, m = divmod(m, 60) if h: return f'{h}:{m:02}:{s:02}' else: return f'{m:02}:{s:02}' def format_distance(distance: Meters, unit: str) -> str: n_units = distance / DISTANCE_UNITS[unit] return f'{n_units:.2f}' if __name__ == '__main__': running()
/running-0.1.3.tar.gz/running-0.1.3/running.py
0.786664
0.356839
running.py
pypi
import sys import argparse import inspect import pydoc from typing import Dict, List, Tuple, Callable, Optional from pathlib import Path from types import ModuleType from collections.abc import ItemsView def filter_vars(imported_vars: ItemsView) -> Dict[str, Callable]: """Gets the name and object of the callable object from the imported_vars. Args: imported_vars (ItemsView): python module items iterator. Returns: Dict[str, Callable]: dictionary of name and callable object """ functions = {} for name, obj in imported_vars: if callable(obj) and not name.startswith('_'): if inspect.isclass(obj): methods = inspect.getmembers(obj(), predicate=inspect.ismethod) for name, method in methods: if not name.startswith('_'): functions[obj.__name__ + "." + name] = method else: functions[obj.__name__] = obj return functions def load_runfile(runfile: Path) -> ItemsView: """Load Python file. Args: runfile (Path): Python file path. Returns: ItemsView: python module items iterator. """ importer = __import__ directory, runfile = runfile.parent, Path(runfile.name) sys.path.insert(0, str(directory)) imported: ModuleType = importer(runfile.stem) del sys.path[0] imported_vars = vars(imported).items() return imported_vars def _escape_split(sep: str, argstr: str) -> List[str]: """Split function with escape characters. Args: sep (str): Split character. argstr (str): String to be divided. Returns: List[str]: List of splited strings. """ escaped_sep = r'\%s' % sep if escaped_sep not in argstr: return argstr.split(sep) before, _, after = argstr.partition(escaped_sep) startlist = before.split(sep) unfinished = startlist[-1] startlist = startlist[:-1] endlist = _escape_split(sep, after) unfinished += sep + endlist[0] return startlist + [unfinished] + endlist[1:] def parse_args(cmd: str) -> Tuple[str, List[str], Dict[str, str]]: """Argment parser. Args: cmd (str): argment. Returns: Tuple[str, List[str], Dict[str, str]]: original argument, positional argument, and key word argument. """ args = [] kwargs = {} if ':' in cmd: cmd, argstr = cmd.split(':', 1) for pair in _escape_split(',', argstr): result = _escape_split('=', pair) if len(result) > 1: k, v = result kwargs[k] = v else: args.append(result[0]) return cmd, args, kwargs def get_docstring(function: Callable, abbrv: bool = False) -> str: """Get docstring. Args: function (Callable): targe callable object abbrv (bool, optional): Defaults to False. Returns: str: docstring. """ doc = inspect.getdoc(function) if abbrv and doc is not None: doc = doc.splitlines()[0].strip() else: doc = "" return doc def get_function(functions: Dict[str, Callable], function_name: str) -> Optional[Callable]: """Get function. Args: functions (Dict[str, Callable]): original function dictionaly. function_name (str): search function name. Returns: Optional[Callable]: find callable object. """ try: return functions[function_name] except KeyError: print("No function named '{}' found!".format(function_name)) return None def print_functions(functions: Dict[str, Callable]) -> None: """Print functions list. Args: functions (Dict[str, Callable]): Print functions """ print("Available functions:") for fname, function in functions.items(): doc = get_docstring(function, abbrv=True) print(fname + "\t" + doc if doc is not None else "") def print_function(functions: Dict[str, Callable], function: str) -> None: """Print function. Args: functions (Dict[str, Callable]): Print functions. function (str): function name. """ func = get_function(functions, function) if func: print(pydoc.plain(pydoc.render_doc( func, "Displaying docstring for %s") )) def run_function(functions: Dict[str, Callable], cmd: str) -> None: """Run function. Args: functions (Dict[str, Callable]): Print functions. cmd (str): command. """ function, args, kwargs = parse_args(cmd) try: func = get_function(functions, function) if func: func(*args, **kwargs) except TypeError as e: print(e.args[0]) def main(argv: Optional[List[str]] = None) -> None: """Main function. Args: argv (List[str]): command line arguments. """ parser = argparse.ArgumentParser( prog="runp", description='Run functions in a file.') parser.add_argument( 'runfile', help='file containing the functions') parser.add_argument( 'function', nargs='?', help='function to run') parser.add_argument( '-l', '--list', action='store_true', help='list available functions in file' ) parser.add_argument( '-d', '--detail', help='print function docstring' ) if argv is None: argv = sys.argv[1:] args = parser.parse_args(argv) runfile = Path(args.runfile).resolve() if not runfile.is_file(): print("No such file '{}'".format(args.runfile)) sys.exit(1) imported_vars = load_runfile(runfile) functions = filter_vars(imported_vars) if args.list: print_functions(functions) sys.exit(0) if args.detail: print_function(functions, args.detail) sys.exit(0) if args.function is None: print("No function was selected!") sys.exit(1) run_function(functions, args.function) if __name__ == "__main__": main(sys.argv[1:])
/runp3-0.0.6-py3-none-any.whl/runp/runp.py
0.661486
0.328556
runp.py
pypi
[![runpandarun on pypi](https://img.shields.io/pypi/v/runpandarun)](https://pypi.org/project/runpandarun/) [![Python test and package](https://github.com/simonwoerpel/runpandarun/actions/workflows/python.yml/badge.svg)](https://github.com/simonwoerpel/runpandarun/actions/workflows/python.yml) [![pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit)](https://github.com/pre-commit/pre-commit) [![Coverage Status](https://coveralls.io/repos/github/investigativedata/runpandarun/badge.svg?branch=develop)](https://coveralls.io/github/investigativedata/runpandarun?branch=develop) [![MIT License](https://img.shields.io/pypi/l/runpandarun)](./LICENSE) # Run Panda Run :panda_face: :panda_face: :panda_face: :panda_face: :panda_face: :panda_face: :panda_face: A simple interface written in python for reproducible i/o workflows around tabular data via [pandas](https://pandas.pydata.org/) `DataFrame` specified via `yaml` "playbooks". **NOTICE** As of july 2023, this package only handles pandas transform logic, no data warehousing anymore. See [archived version](https://github.com/simonwoerpel/runpandarun/tree/master) ## Quickstart [Install via pip](#installation) Specify your operations via `yaml` syntax: ```yaml read: uri: ./data.csv options: skiprows: 3 operations: - handler: DataFrame.rename options: columns: value: amount - handler: Series.map column: slug options: func: "lambda x: normality.slugify(x) if isinstance(x) else 'NO DATA'" ``` store this as a file `pandas.yml`, and apply a data source: cat data.csv | runpandarun pandas.yml > data_transformed.csv Or, use within your python scripts: ```python from runpandarun import Playbook play = Playbook.from_yaml("./pandas.yml") df = play.run() # get the transformed dataframe # change playbook parameters on run time: play.read.uri = "s3://my-bucket/data.csv" df = play.run() df.to_excel("./output.xlsx") # the play can be applied directly to a data frame, # this allows more granular control df = get_my_data_from_somewhere_else() df = play.run(df) ``` ## Installation Requires at least python3.10 Virtualenv use recommended. Additional dependencies (`pandas` et. al.) will be installed automatically: pip install runpandarun After this, you should be able to execute in your terminal: runpandarun --help ## Reference The playbook can be programmatically obtained in different ways: ```python from runpandarun import Playbook # via yaml file play = Playbook.from_yaml('./path/to/config.yml') # via yaml string play = Playbook.from_string(""" operations: - handler: DataFrame.sort_values options: by: my_sort_column """) # directly via the Playbook object (which is a pydantic object) play = Playbook(operations=[{ "handler": "DataFrane.sort_values", "options": {"by": "my_sort_column"} }]) ``` All options within the Playbook are optional, if you apply an empty play to a DataFrame, it will just remain untouched (but `runpandarun` won't break) The playbook has three sections: - read: instructions for reading in a source dataframe - operations: a list of functions with their options (kwargs) executed in the given order - write: instructions for saving a transformed dataframe to a target ### Read and write `pandas` can read and write from many local and remote sources and targets. More information about handlers and their options: [Pandas IO tools](https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html) For example, you could transform a source from `s3` to a `sftp` endpoint: runpandarun pandas.yml -i s3://my_bucket/data.csv -o sftp://user@host/data.csv you can overwrite the `uri` arguments in the command line with `-i / --in-uri` and `-o / --out-uri` ```yaml read: uri: s3://my-bucket/data.xls # input uri, anything that pandas can read handler: read_excel # default: guess by file extension, fallback: read_csv options: # options for the handler skiprows: 2 write: uri: ./data.xlsx # output uri, anything that pandas can write to handler: write_excel # default: guess by file extension, fallback: write_csv options: # options for the handler index: false ``` ### Operations The `operations` key of the yaml spec holds the transformations that should be applied to the data in order. An operation can be any function from [pd.DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/frame.html) or [pd.Series](https://pandas.pydata.org/pandas-docs/stable/reference/series.html). Refer to these documentations to see their possible options (as in `**kwargs`). For the handler, specify the module path without a `pd` or `pandas` prefix, just `DataFrame.<func>` or `Series.<func>`. When using a function that applies to a `Series`, tell :panda_face: which one to use via the `column` prop. ```yaml operations: - handler: DataFrame.rename options: columns: value: amount ``` This exactly represents this python call to the processed dataframe: ```python df.rename(columns={"value": "amount"}) ``` ### env vars For api keys or other secrets, you can put environment variables anywhere into the config. They will simply resolved via `os.path.expandvars` ```yaml read: options: storage_options: header: "api-key": ${MY_API_KEY} ``` ## Example A full playbook example that covers a few of the possible cases. See the yaml files in [./tests/fixtures/](./tests/fixtures/) for more. ```yaml read: uri: https://api.example.org/data?format=csv options: storage_options: header: "api-key": ${API_KEY} skipfooter: 1 operations: - handler: DataFrame.rename options: columns: value: amount - handler: Series.str.lower column: state - handler: DataFrame.assign options: city_id: "lambda x: x['state'] + '-' + x['city'].map(normality.slugify)" - handler: DataFrame.set_index options: keys: - city_id - handler: DataFrame.sort_values options: by: - state - city write: uri: ftp://user:${FTP_PASSWORD}@host/data.csv options: index: false ``` ## How to... ### Rename columns [`DataFrame.rename`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.rename.html) ```yaml operations: - handler: DataFrame.rename options: columns: value: amount "First name": first_name ``` ### Apply modification to a column [`Series.map`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.map.html) ```yaml operations: - handler: Series.map column: my_column options: func: "lambda x: x.lower()" ``` ### Set an index [`DataFrame.set_index`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.set_index.html) ```yaml operations: - handler: DataFrame.set_index options: keys: - city_id ``` ### Sort values [`DataFrame.sort_values`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sort_values.html) ```yaml operations: - sort_values: by: - column1 - column2 ascending: false ``` ### De-duplicate [`DataFrame.drop_duplicates`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html) when using a subset of columns, use in conjunction with `sort_values` to make sure to keep the right records ```yaml operations: - drop_duplicates: subset: - column1 - column2 keep: last ``` ### Compute a new column based on existing data [`DataFrame.assign`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.assign.html) ```yaml operations: - handler: DataFrame.assign options: city_id: "lambda x: x['state'] + '-' + x['city'].map(normality.slugify)" ``` ### SQL [Pandas SQL io](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_sql.html#pandas.read_sql) ```yaml read: uri: postgresql://user:password@host/database options: sql: "SELECT * FROM my_table WHERE category = 'A'" ``` ## save eval Ok wait, you are executing arbitrary python code in the yaml specs? Not really, there is a strict allow list of possible modules that can be used. See [runpandarun.util.safe_eval](https://github.com/investigativedata/runpandarun/blob/develop/runpandarun/util.py) This includes: - any pandas or numpy modules - [normality](https://github.com/pudo/normality/) - [fingerprints](https://github.com/alephdata/fingerprints) So, this would, of course, **NOT WORK** ([as tested here](https://github.com/investigativedata/runpandarun/blob/develop/tests/test_playbook.py)) ```yaml operations: - handler: DataFrame.apply func: "__import__('os').system('rm -rf /')" ``` ## development Package is managed via [Poetry](https://python-poetry.org/) git clone https://github.com/investigativedata/runpandarun Install requirements: poetry install --with dev Test: make test ## Funding Since July 2023, this project is part of [investigraph](https://investigraph.dev) and development of this project is funded by [Media Tech Lab Bayern batch #3](https://github.com/media-tech-lab) <a href="https://www.media-lab.de/en/programs/media-tech-lab"> <img src="https://raw.githubusercontent.com/media-tech-lab/.github/main/assets/mtl-powered-by.png" width="240" title="Media Tech Lab powered by logo"> </a>
/runpandarun-0.2.5.tar.gz/runpandarun-0.2.5/README.md
0.688154
0.961606
README.md
pypi
.. image:: https://raw.githubusercontent.com/corriporai/runpandas/master/docs/source/_static/images/runpandas_banner.png RunPandas - Python Package for handing running data from GPS-enabled devices to worldwide race results. ======================================================================================================= .. image:: https://img.shields.io/pypi/v/runpandas.svg :target: https://pypi.python.org/pypi/runpandas/ .. image:: https://anaconda.org/marcelcaraciolo/runpandas/badges/version.svg :target: https://anaconda.org/marcelcaraciolo/runpandas .. image:: https://img.shields.io/github/issues/corriporai/runpandas.svg :target: https://github.com/corriporai/runpandas/issues .. image:: https://www.codefactor.io/repository/github/corriporai/runpandas/badge :target: https://www.codefactor.io/repository/github/corriporai/runpandas :alt: CodeFactor .. image:: https://github.com/corriporai/runpandas/workflows/Build/badge.svg?branch=master :target: https://github.com/corriporai/runpandas/actions/workflows/build.yml .. image:: https://coveralls.io/repos/github/corriporai/runpandas/badge.svg?branch=master :target: https://coveralls.io/github/corriporai/runpandas .. image:: https://codecov.io/gh/corriporai/runpandas/branch/master/graph/badge.svg :target: https://codecov.io/gh/corriporai/runpandas .. image:: https://readthedocs.org/projects/runpandas/badge/?version=latest :target: https://runpandas.readthedocs.io/en/latest/?badge=latest .. image:: https://img.shields.io/badge/code%20style-black-000000.svg :target: https://github.com/psf/black .. image:: https://static.pepy.tech/personalized-badge/runpandas?period=total&units=international_system&left_color=black&right_color=orange&left_text=Downloads :target: https://pepy.tech/project/runpandas .. image:: https://mybinder.org/badge_logo.svg :target: https://mybinder.org/v2/gh/corriporai/runpandas/HEAD .. image:: https://zenodo.org/badge/272209151.svg :target: https://zenodo.org/badge/latestdoi/272209151 ========= Introduction ------------ RunPandas is a project to add support for data collected by GPS-enabled tracking devices, heart rate monitors data to [pandas](http://pandas.pydata.org) objects. It is a Python package that provides infrastructure for importing tracking data from such devices, enabling statistical and visual analysis for running enthusiasts and lovers. Its goal is to fill the gap between the routine collection of data and their manual analyses in Pandas and Python. Since the release ``0.6.0`` it comes with the support of handling race event results, so we can analyze from race split times, finish times, demographics, etc. The goal is to support several many races results available to anyone interested in running race results analytics. Documentation ------------- `Stable documentation `__ is available on `github.io <https://corriporai.github.io/runpandas/>`__. A second copy of the stable documentation is hosted on `read the docs <https://runpandas.readthedocs.io/>`_ for more details. `Development documentation <https://corriporai.github.io/runpandas/devel/>`__ is available for the latest changes in master. ==> Check out `this Blog post <https://corriporai.github.io/pandasrunner/general/2020/08/01/welcome-to-runpandas.html>`_ for the reasoning and philosophy behind Runpandas, as well as a detailed tutorial with code examples. ==> Follow `this Runpandas live book <https://github.com/corriporai/runpandasbook>`_ in Jupyter notebook format based on `Jupyter Books <https://jupyterbook.org/intro.html>`_. Install -------- RunPandas depends on the following packages: - ``pandas`` - ``fitparse`` - ``stravalib`` - ``pydantic`` - ``pyaml`` - ``haversine`` - ``thefuzz``` Runpandas was tested to work on \*nix-like systems, including macOS. ----- Install latest release version via pip ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: shell $ pip install runpandas Install latest release version via conda ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: shell $ conda install -c marcelcaraciolo runpandas Install latest development version ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: shell $ pip install git+https://github.com/corriporai/runpandas.git or .. code-block:: shell $ git clone https://github.com/corriporai/runpandas.git $ python setup.py install Examples -------- Install using ``pip`` and then import and use one of the tracking readers. This example loads a local file.tcx. From the data file, we obviously get time, altitude, distance, heart rate and geo position (lat/long). .. code:: ipython3 # !pip install runpandas import runpandas as rpd activity = rpd.read_file('./sample.tcx') .. code:: ipython3 activity.head(5) .. raw:: html <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>alt</th> <th>dist</th> <th>hr</th> <th>lon</th> <th>lat</th> </tr> <tr> <th>time</th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>00:00:00</th> <td>178.942627</td> <td>0.000000</td> <td>62.0</td> <td>-79.093187</td> <td>35.951880</td> </tr> <tr> <th>00:00:01</th> <td>178.942627</td> <td>0.000000</td> <td>62.0</td> <td>-79.093184</td> <td>35.951880</td> </tr> <tr> <th>00:00:06</th> <td>178.942627</td> <td>1.106947</td> <td>62.0</td> <td>-79.093172</td> <td>35.951868</td> </tr> <tr> <th>00:00:12</th> <td>177.500610</td> <td>13.003035</td> <td>62.0</td> <td>-79.093228</td> <td>35.951774</td> </tr> <tr> <th>00:00:16</th> <td>177.500610</td> <td>22.405027</td> <td>60.0</td> <td>-79.093141</td> <td>35.951732</td> </tr> </tbody> </table> </div> The data frames that are returned by runpandas when loading files is similar for different file types. The dataframe in the above example is a subclass of the ``pandas.DataFrame`` and provides some additional features. Certain columns also return specific ``pandas.Series`` subclasses, which provides useful methods: .. code:: ipython3 print (type(activity)) print(type(activity.alt)) .. parsed-literal:: <class 'runpandas.types.frame.Activity'> <class 'runpandas.types.columns.Altitude'> For instance, if you want to get the base unit for the altitude ``alt`` data or the distance ``dist`` data: .. code:: ipython3 print(activity.alt.base_unit) print(activity.alt.sum()) .. parsed-literal:: m 65883.68151855901 .. code:: ipython3 print(activity.dist.base_unit) print(activity.dist[-1]) .. parsed-literal:: m 4686.31103516 The ``Activity`` dataframe also contains special properties that presents some statistics from the workout such as elapsed time, mean heartrate, the moving time and the distance of workout in meters. .. code:: ipython3 #total time elapsed for the activity print(activity.ellapsed_time) #distance of workout in meters print(activity.distance) #mean heartrate print(activity.mean_heart_rate()) .. parsed-literal:: 0 days 00:33:11 4686.31103516 156.65274151436032 Occasionally, some observations such as speed, distance and others must be calculated based on available data in the given activity. In runpandas there are special accessors (``runpandas.acessors``) that computes some of these metrics. We will compute the ``speed`` and the ``distance per position`` observations using the latitude and longitude for each record and calculate the haversine distance in meters and the speed in meters per second. .. code:: ipython3 #compute the distance using haversine formula between two consecutive latitude, longitudes observations. activity['distpos'] = activity.compute.distance() activity['distpos'].head() .. parsed-literal:: time 00:00:00 NaN 00:00:01 0.333146 00:00:06 1.678792 00:00:12 11.639901 00:00:16 9.183847 Name: distpos, dtype: float64 .. code:: ipython3 #compute the distance using haversine formula between two consecutive latitude, longitudes observations. activity['speed'] = activity.compute.speed(from_distances=True) activity['speed'].head() .. parsed-literal:: time 00:00:00 NaN 00:00:01 0.333146 00:00:06 0.335758 00:00:12 1.939984 00:00:16 2.295962 Name: speed, dtype: float64 Popular running metrics are also available through the runpandas acessors such as gradient, pace, vertical speed , etc. .. code:: ipython3 activity['vam'] = activity.compute.vertical_speed() activity['vam'].head() .. parsed-literal:: time 00:00:00 NaN 00:00:01 0.000000 00:00:06 0.000000 00:00:12 -0.240336 00:00:16 0.000000 Name: vam, dtype: float64 Sporadically, there will be a large time difference between consecutive observations in the same workout. This can happen when device is paused by the athlete or therere proprietary algorithms controlling the operating sampling rate of the device which can auto-pause when the device detects no significant change in position. In runpandas there is an algorithm that will attempt to calculate the moving time based on the GPS locations, distances, and speed of the activity. To compute the moving time, there is a special acessor that detects the periods of inactivity and returns the ``moving`` series containing all the observations considered to be stopped. .. code:: ipython3 activity_only_moving = activity.only_moving() print(activity_only_moving['moving'].head()) .. parsed-literal:: time 00:00:00 False 00:00:01 False 00:00:06 False 00:00:12 True 00:00:16 True Name: moving, dtype: bool Now we can compute the moving time, the time of how long the user were active. .. code:: ipython3 activity_only_moving.moving_time .. parsed-literal:: Timedelta('0 days 00:33:05') Runpandas also provides a method ``summary`` for summarising the activity through common statistics. Such a session summary includes estimates of several metrics computed above with a single call. .. code:: ipython3 activity_only_moving.summary() .. parsed-literal:: Session Running: 26-12-2012 21:29:53 Total distance (meters) 4686.31 Total ellapsed time 0 days 00:33:11 Total moving time 0 days 00:33:05 Average speed (km/h) 8.47656 Average moving speed (km/h) 8.49853 Average pace (per 1 km) 0 days 00:07:04 Average pace moving (per 1 km) 0 days 00:07:03 Average cadence NaN Average moving cadence NaN Average heart rate 156.653 Average moving heart rate 157.4 Average temperature NaN dtype: object Now, let’s play with the data. Let’s show distance vs as an example of what and how we can create visualizations. In this example, we will use the built in, matplotlib based plot function. .. code:: ipython3 activity[['dist']].plot() .. parsed-literal:: Matplotlib is building the font cache; this may take a moment. .. parsed-literal:: <AxesSubplot:xlabel='time'> .. image:: examples/overview_files/overview_10_2.svg And here is altitude versus time. .. code:: ipython3 activity[['alt']].plot() .. parsed-literal:: <AxesSubplot:xlabel='time'> .. image:: examples/overview_files/overview_12_1.svg Finally, lest’s show the altitude vs distance profile. Here is a scatterplot that shows altitude vs distance as recorded. .. code:: ipython3 activity.plot.scatter(x='dist', y='alt', c='DarkBlue') .. parsed-literal:: <AxesSubplot:xlabel='dist', ylabel='alt'> .. image:: examples/overview_files/overview_14_1.svg Finally, let’s watch a glimpse of the map route by plotting a 2d map using logintude vs latitude. .. code:: ipython3 activity.plot(x='lon', y='lat') .. parsed-literal:: <AxesSubplot:xlabel='lon'> .. image:: examples/overview_files/overview_16_1.svg The ``runpandas`` package also comes with extra batteries, such as our ``runpandas.datasets`` package, which includes a range of example data for testing purposes. There is a dedicated `repository <https://github.com/corriporai/runpandas-data>`__ with all the data available. An index of the data is kept `here <https://github.com/corriporai/runpandas-data/blob/master/activities/index.yml>`__. You can use the example data available: .. code:: ipython3 example_fit = rpd.activity_examples(path='Garmin_Fenix_6S_Pro-Running.fit') print(example_fit.summary) print('Included metrics:', example_fit.included_data) .. parsed-literal:: Synced from watch Garmin Fenix 6S Included metrics: [<MetricsEnum.latitude: 'latitude'>, <MetricsEnum.longitude: 'longitude'>, <MetricsEnum.elevation: 'elevation'>, <MetricsEnum.heartrate: 'heartrate'>, <MetricsEnum.cadence: 'cadence'>, <MetricsEnum.distance: 'distance'>, <MetricsEnum.temperature: 'temperature'>] .. code:: ipython3 rpd.read_file(example_fit.path).head() .. raw:: html <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>enhanced_speed</th> <th>enhanced_altitude</th> <th>unknown_87</th> <th>fractional_cadence</th> <th>lap</th> <th>session</th> <th>unknown_108</th> <th>dist</th> <th>cad</th> <th>hr</th> <th>lon</th> <th>lat</th> <th>temp</th> </tr> <tr> <th>time</th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>00:00:00</th> <td>0.000</td> <td>254.0</td> <td>0</td> <td>0.0</td> <td>0</td> <td>0</td> <td>NaN</td> <td>0.00</td> <td>0</td> <td>101</td> <td>13.843376</td> <td>51.066280</td> <td>8</td> </tr> <tr> <th>00:00:01</th> <td>0.000</td> <td>254.0</td> <td>0</td> <td>0.0</td> <td>0</td> <td>0</td> <td>NaN</td> <td>0.00</td> <td>0</td> <td>101</td> <td>13.843374</td> <td>51.066274</td> <td>8</td> </tr> <tr> <th>00:00:10</th> <td>1.698</td> <td>254.0</td> <td>0</td> <td>0.0</td> <td>0</td> <td>1</td> <td>2362.0</td> <td>0.00</td> <td>83</td> <td>97</td> <td>13.843176</td> <td>51.066249</td> <td>8</td> </tr> <tr> <th>00:00:12</th> <td>2.267</td> <td>254.0</td> <td>0</td> <td>0.0</td> <td>0</td> <td>1</td> <td>2362.0</td> <td>3.95</td> <td>84</td> <td>99</td> <td>13.843118</td> <td>51.066250</td> <td>8</td> </tr> <tr> <th>00:00:21</th> <td>2.127</td> <td>254.6</td> <td>0</td> <td>0.5</td> <td>0</td> <td>1</td> <td>2552.0</td> <td>16.67</td> <td>87</td> <td>100</td> <td>13.842940</td> <td>51.066231</td> <td>8</td> </tr> </tbody> </table> </div> In case of you just only want to see all the activities in a specific file type , you can filter the ``runpandas.activities_examples``, which returns a filter iterable that you can iterate over: .. code:: ipython3 fit_examples = rpd.activity_examples(file_type=rpd.FileTypeEnum.FIT) for example in fit_examples: #Download and play with the filtered examples print(example.path) .. parsed-literal:: https://raw.githubusercontent.com/corriporai/runpandas-data/master/activities/Garmin_Fenix_6S_Pro-Running.fit https://raw.githubusercontent.com/corriporai/runpandas-data/master/activities/Garmin_Fenix2_running_with_hrm.fit https://raw.githubusercontent.com/corriporai/runpandas-data/master/activities/Garmin_Forerunner_910XT-Running.fit Exploring sessions ================== The package ``runpandas`` provides utilities to import a group of activities data, and after careful processing, organises them into a MultiIndex Dataframe. The ``pandas.MultiIndex`` allows you to have multiple columns acting as a row identifier and multiple rows acting as a header identifier. In our scenario we will have as first indentifier (index) the timestamp of the workout when it started, and as second indentifier the timedelta of the consecutive observations of the workout. .. figure:: examples/MultiIndexDataframe.png :alt: Illustration of the MultiIndex Dataframe The MultiIndex Runpandas Activity Dataframe The MultiIndex dataframe result from the function ``runpandas.read_dir_aggregate``, which takes as input the directory of tracking data files, and constructs using the read*() functions to build ``runpandas.Activity`` objects. Them, the result daframes are first sorted by the time stamps and are all combined into a single ``runpandas.Activity`` indexed by the two-level ``pandas.MultiIndex``. Let’s illustrate these examples by loading a bunch of 68 running activities of a female runner over the years of 2020 until 2021. .. code:: ipython3 import warnings warnings.filterwarnings('ignore') .. code:: ipython3 import runpandas session = runpandas.read_dir_aggregate(dirname='session/') .. code:: ipython3 session .. raw:: html <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th></th> <th>alt</th> <th>hr</th> <th>lon</th> <th>lat</th> </tr> <tr> <th>start</th> <th>time</th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th rowspan="5" valign="top">2020-08-30 09:08:51.012</th> <th>00:00:00</th> <td>NaN</td> <td>NaN</td> <td>-34.893609</td> <td>-8.045055</td> </tr> <tr> <th>00:00:01.091000</th> <td>NaN</td> <td>NaN</td> <td>-34.893624</td> <td>-8.045054</td> </tr> <tr> <th>00:00:02.091000</th> <td>NaN</td> <td>NaN</td> <td>-34.893641</td> <td>-8.045061</td> </tr> <tr> <th>00:00:03.098000</th> <td>NaN</td> <td>NaN</td> <td>-34.893655</td> <td>-8.045063</td> </tr> <tr> <th>00:00:04.098000</th> <td>NaN</td> <td>NaN</td> <td>-34.893655</td> <td>-8.045065</td> </tr> <tr> <th>...</th> <th>...</th> <td>...</td> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th rowspan="5" valign="top">2021-07-04 11:23:19.418</th> <th>00:52:39.582000</th> <td>0.050001</td> <td>189.0</td> <td>-34.894534</td> <td>-8.046602</td> </tr> <tr> <th>00:52:43.582000</th> <td>NaN</td> <td>NaN</td> <td>-34.894465</td> <td>-8.046533</td> </tr> <tr> <th>00:52:44.582000</th> <td>NaN</td> <td>NaN</td> <td>-34.894443</td> <td>-8.046515</td> </tr> <tr> <th>00:52:45.582000</th> <td>NaN</td> <td>NaN</td> <td>-34.894429</td> <td>-8.046494</td> </tr> <tr> <th>00:52:49.582000</th> <td>NaN</td> <td>190.0</td> <td>-34.894395</td> <td>-8.046398</td> </tr> </tbody> </table> <p>48794 rows × 4 columns</p> </div> Now let’s see how many activities there are available for analysis. For this question, we also have an acessor ``runpandas.types.acessors.session._SessionAcessor`` that holds several methods for computing the basic running metrics across all the activities from this kind of frame and some summary statistics. .. code:: ipython3 #count the number of activities in the session print ('Total Activities:', session.session.count()) .. parsed-literal:: Total Activities: 68 We might compute the main running metrics (speed, pace, moving, etc) using the session acessors methods as like the ones available in the ``runpandas.types.metrics.MetricsAcessor`` . By the way, those methods are called inside each metric method, but applying in each of activities separatedely. .. code:: ipython3 #In this example we compute the distance and the distance per position across all workouts session = session.session.distance() session .. raw:: html <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th></th> <th>alt</th> <th>hr</th> <th>lon</th> <th>lat</th> <th>distpos</th> <th>dist</th> </tr> <tr> <th>start</th> <th>time</th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th rowspan="5" valign="top">2020-08-30 09:08:51.012</th> <th>00:00:00</th> <td>NaN</td> <td>NaN</td> <td>-34.893609</td> <td>-8.045055</td> <td>NaN</td> <td>NaN</td> </tr> <tr> <th>00:00:01.091000</th> <td>NaN</td> <td>NaN</td> <td>-34.893624</td> <td>-8.045054</td> <td>1.690587</td> <td>1.690587</td> </tr> <tr> <th>00:00:02.091000</th> <td>NaN</td> <td>NaN</td> <td>-34.893641</td> <td>-8.045061</td> <td>2.095596</td> <td>3.786183</td> </tr> <tr> <th>00:00:03.098000</th> <td>NaN</td> <td>NaN</td> <td>-34.893655</td> <td>-8.045063</td> <td>1.594298</td> <td>5.380481</td> </tr> <tr> <th>00:00:04.098000</th> <td>NaN</td> <td>NaN</td> <td>-34.893655</td> <td>-8.045065</td> <td>0.163334</td> <td>5.543815</td> </tr> <tr> <th>...</th> <th>...</th> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th rowspan="5" valign="top">2021-07-04 11:23:19.418</th> <th>00:52:39.582000</th> <td>0.050001</td> <td>189.0</td> <td>-34.894534</td> <td>-8.046602</td> <td>12.015437</td> <td>8220.018885</td> </tr> <tr> <th>00:52:43.582000</th> <td>NaN</td> <td>NaN</td> <td>-34.894465</td> <td>-8.046533</td> <td>10.749779</td> <td>8230.768664</td> </tr> <tr> <th>00:52:44.582000</th> <td>NaN</td> <td>NaN</td> <td>-34.894443</td> <td>-8.046515</td> <td>3.163638</td> <td>8233.932302</td> </tr> <tr> <th>00:52:45.582000</th> <td>NaN</td> <td>NaN</td> <td>-34.894429</td> <td>-8.046494</td> <td>2.851535</td> <td>8236.783837</td> </tr> <tr> <th>00:52:49.582000</th> <td>NaN</td> <td>190.0</td> <td>-34.894395</td> <td>-8.046398</td> <td>11.300740</td> <td>8248.084577</td> </tr> </tbody> </table> <p>48794 rows × 6 columns</p> </div> .. code:: ipython3 #comput the speed for each activity session = session.session.speed(from_distances=True) #compute the pace for each activity session = session.session.pace() #compute the inactivity periods for each activity session = session.session.only_moving() After all the computation done, let’s going to the next step: the exploration and get some descriptive statistics. After the loading and metrics computation for all the activities, now let’s look further the data and get the basic summaries about the session: time spent, total distance, mean speed and other insightful statistics in each running activity. For this task, we may accomplish it by calling the method ``runpandas.types.session._SessionAcessor.summarize`` . It will return a basic Dataframe including all the aggregated statistics per activity from the season frame. .. code:: ipython3 summary = session.session.summarize() summary .. raw:: html <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>moving_time</th> <th>mean_speed</th> <th>max_speed</th> <th>mean_pace</th> <th>max_pace</th> <th>mean_moving_speed</th> <th>mean_moving_pace</th> <th>mean_cadence</th> <th>max_cadence</th> <th>mean_moving_cadence</th> <th>mean_heart_rate</th> <th>max_heart_rate</th> <th>mean_moving_heart_rate</th> <th>mean_temperature</th> <th>min_temperature</th> <th>max_temperature</th> <th>total_distance</th> <th>ellapsed_time</th> </tr> <tr> <th>start</th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>2020-07-03 09:50:53.162</th> <td>00:25:29.838000</td> <td>2.642051</td> <td>4.879655</td> <td>00:06:18</td> <td>00:03:24</td> <td>2.665008</td> <td>00:06:15</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>178.819923</td> <td>188.0</td> <td>178.872587</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>4089.467333</td> <td>00:25:47.838000</td> </tr> <tr> <th>2020-07-05 09:33:20.999</th> <td>00:05:04.999000</td> <td>2.227637</td> <td>6.998021</td> <td>00:07:28</td> <td>00:02:22</td> <td>3.072098</td> <td>00:05:25</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>168.345455</td> <td>176.0</td> <td>168.900000</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>980.162640</td> <td>00:07:20.001000</td> </tr> <tr> <th>2020-07-05 09:41:59.999</th> <td>00:18:19</td> <td>1.918949</td> <td>6.563570</td> <td>00:08:41</td> <td>00:02:32</td> <td>2.729788</td> <td>00:06:06</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>173.894180</td> <td>185.0</td> <td>174.577143</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>3139.401118</td> <td>00:27:16</td> </tr> <tr> <th>2020-07-13 09:13:58.718</th> <td>00:40:21.281000</td> <td>2.509703</td> <td>8.520387</td> <td>00:06:38</td> <td>00:01:57</td> <td>2.573151</td> <td>00:06:28</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>170.808176</td> <td>185.0</td> <td>170.795527</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>6282.491059</td> <td>00:41:43.281000</td> </tr> <tr> <th>2020-07-17 09:33:02.308</th> <td>00:32:07.691000</td> <td>2.643278</td> <td>8.365431</td> <td>00:06:18</td> <td>00:01:59</td> <td>2.643278</td> <td>00:06:18</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>176.436242</td> <td>186.0</td> <td>176.436242</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>5095.423045</td> <td>00:32:07.691000</td> </tr> <tr> <th>...</th> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th>2021-06-13 09:22:30.985</th> <td>01:32:33.018000</td> <td>2.612872</td> <td>23.583956</td> <td>00:06:22</td> <td>00:00:42</td> <td>2.810855</td> <td>00:05:55</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>169.340812</td> <td>183.0</td> <td>169.655879</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>15706.017295</td> <td>01:40:11.016000</td> </tr> <tr> <th>2021-06-20 09:16:55.163</th> <td>00:59:44.512000</td> <td>2.492640</td> <td>6.065895</td> <td>00:06:41</td> <td>00:02:44</td> <td>2.749453</td> <td>00:06:03</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>170.539809</td> <td>190.0</td> <td>171.231392</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>9965.168311</td> <td>01:06:37.837000</td> </tr> <tr> <th>2021-06-23 09:37:44.000</th> <td>00:26:49.001000</td> <td>2.501796</td> <td>5.641343</td> <td>00:06:39</td> <td>00:02:57</td> <td>2.568947</td> <td>00:06:29</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>156.864865</td> <td>171.0</td> <td>156.957031</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>4165.492241</td> <td>00:27:45.001000</td> </tr> <tr> <th>2021-06-27 09:50:08.664</th> <td>00:31:42.336000</td> <td>2.646493</td> <td>32.734124</td> <td>00:06:17</td> <td>00:00:30</td> <td>2.661853</td> <td>00:06:15</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>166.642857</td> <td>176.0</td> <td>166.721116</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>5074.217061</td> <td>00:31:57.336000</td> </tr> <tr> <th>2021-07-04 11:23:19.418</th> <td>00:47:47.583000</td> <td>2.602263</td> <td>4.212320</td> <td>00:06:24</td> <td>00:03:57</td> <td>2.856801</td> <td>00:05:50</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>177.821862</td> <td>192.0</td> <td>177.956967</td> <td>NaN</td> <td>NaN</td> <td>NaN</td> <td>8248.084577</td> <td>00:52:49.582000</td> </tr> </tbody> </table> <p>68 rows × 18 columns</p> </div> .. code:: ipython3 print('Session Interval:', (summary.index.to_series().max() - summary.index.to_series().min()).days, 'days') print('Total Workouts:', len(summary), 'runnings') print('Tota KM Distance:', summary['total_distance'].sum() / 1000) print('Average Pace (all runs):', summary.mean_pace.mean()) print('Average Moving Pace (all runs):', summary.mean_moving_pace.mean()) print('Average KM Distance (all runs):', round(summary.total_distance.mean()/ 1000,2)) .. parsed-literal:: Session Interval: 366 days Total Workouts: 68 runnings Tota KM Distance: 491.77377537338896 Average Pace (all runs): 0 days 00:07:18.411764 Average Moving Pace (all runs): 0 days 00:06:02.147058 Average KM Distance (all runs): 7.23 At this point, I have the summary data to start some powerful visualization and analysis. At the charts below we illustrate her pace and distance evolution over time. .. code:: ipython3 import matplotlib.pyplot as plt import datetime #let's convert the pace to float number in minutes summary['mean_moving_pace_float'] = summary['mean_moving_pace'] / datetime.timedelta(minutes=1) summary['pace_moving_all_mean'] = summary.mean_moving_pace.mean() summary['pace_moving_all_mean_float'] = summary['pace_moving_all_mean'] / datetime.timedelta(minutes=1) plt.subplots(figsize=(8, 5)) plt.plot(summary.index, summary.mean_moving_pace_float, color='silver') plt.plot(summary.pace_moving_all_mean_float, color='purple', linestyle='dashed', label='average') plt.title("Pace Evolution") plt.xlabel("Runnings") plt.ylabel("Pace") plt.legend() .. parsed-literal:: <matplotlib.legend.Legend at 0x7f82d8d83cd0> .. image:: examples/overview_files/overview_56_1.svg .. code:: ipython3 plt.subplots(figsize=(8, 5)) summary['distance_all_mean'] = round(summary.total_distance.mean()/1000,2) plt.plot(summary.index, summary.total_distance / 1000, color='silver') plt.plot(summary.distance_all_mean, color='purple', linestyle='dashed', label='average') plt.title("Distance Evolution") plt.xlabel("Runs") plt.ylabel("distance") plt.legend() plt.show() .. image:: examples/overview_files/overview_57_0.svg Accessing historical data from running race results =================================================== One of the great features in Runpandas is the capability of accessing race’s result datasets accross several races around the world, from majors to local ones (if it’s available at our data repository). In this example we will analyze the 2022 Berlin Marathon using runpandas methods specially tailored for handling race results data. First, let’s load the Berlin Marathon data by using the runpandas method ``runpandas.get_events``. This function provides a way of accessing the race data and visualize the results from several marathons available at our datasets repository. Given the year and the marathon identifier you can filter any marathon datasets that you want analyze. The result will be a list of ``runpandas.EventData`` instances with race result and its metadata. Let’s look for Berlin Marathon results. .. code:: ipython3 import pandas as pd import runpandas as rpd import warnings warnings.filterwarnings('ignore') .. code:: ipython3 results = rpd.get_events('Berlin') results .. parsed-literal:: [<Event: name=Berlin Marathon Results from 2022., country=DE, edition=2022>] The result comes with the Berlin Marathon Result from 2022. Let’s take a look inside the race event, which comes with a handful method to describe its attributes and a special method to load the race result data into a ``runpandas.datasets.schema.RaceData`` instance. .. code:: ipython3 berlin_result = results[0] print('Event type', berlin_result.run_type) print('Country', berlin_result.country) print('Year', berlin_result.edition) print('Name', berlin_result.summary) .. parsed-literal:: Event type RunTypeEnum.MARATHON Country DE Year 2022 Name Berlin Marathon Results from 2022. Now that we confirmed that we requested the corresponding marathon dataset. We will load it into a DataFrame so we can further explore it. .. code:: ipython3 #loading the race data into a RaceData Dataframe race_result = berlin_result.load() race_result .. raw:: html <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>position</th> <th>position_gender</th> <th>country</th> <th>sex</th> <th>division</th> <th>bib</th> <th>firstname</th> <th>lastname</th> <th>club</th> <th>starttime</th> <th>...</th> <th>10k</th> <th>15k</th> <th>20k</th> <th>25k</th> <th>30k</th> <th>35k</th> <th>40k</th> <th>grosstime</th> <th>nettime</th> <th>category</th> </tr> </thead> <tbody> <tr> <th>0</th> <td>1</td> <td>1</td> <td>KEN</td> <td>M</td> <td>1</td> <td>1</td> <td>Eliud</td> <td>Kipchoge</td> <td>–</td> <td>09:15:00</td> <td>...</td> <td>0 days 00:28:23</td> <td>0 days 00:42:33</td> <td>0 days 00:56:45</td> <td>0 days 01:11:08</td> <td>0 days 01:25:40</td> <td>0 days 01:40:10</td> <td>0 days 01:54:53</td> <td>0 days 02:01:09</td> <td>0 days 02:01:09</td> <td>M35</td> </tr> <tr> <th>1</th> <td>2</td> <td>2</td> <td>KEN</td> <td>M</td> <td>1</td> <td>5</td> <td>Mark</td> <td>Korir</td> <td>–</td> <td>09:15:00</td> <td>...</td> <td>0 days 00:28:56</td> <td>0 days 00:43:35</td> <td>0 days 00:58:14</td> <td>0 days 01:13:07</td> <td>0 days 01:28:06</td> <td>0 days 01:43:25</td> <td>0 days 01:59:05</td> <td>0 days 02:05:58</td> <td>0 days 02:05:58</td> <td>M30</td> </tr> <tr> <th>2</th> <td>3</td> <td>3</td> <td>ETH</td> <td>M</td> <td>1</td> <td>8</td> <td>Tadu</td> <td>Abate</td> <td>–</td> <td>09:15:00</td> <td>...</td> <td>0 days 00:29:46</td> <td>0 days 00:44:40</td> <td>0 days 00:59:40</td> <td>0 days 01:14:44</td> <td>0 days 01:30:01</td> <td>0 days 01:44:55</td> <td>0 days 02:00:03</td> <td>0 days 02:06:28</td> <td>0 days 02:06:28</td> <td>MH</td> </tr> <tr> <th>3</th> <td>4</td> <td>4</td> <td>ETH</td> <td>M</td> <td>2</td> <td>26</td> <td>Andamlak</td> <td>Belihu</td> <td>–</td> <td>09:15:00</td> <td>...</td> <td>0 days 00:28:23</td> <td>0 days 00:42:33</td> <td>0 days 00:56:45</td> <td>0 days 01:11:09</td> <td>0 days 01:26:11</td> <td>0 days 01:42:14</td> <td>0 days 01:59:14</td> <td>0 days 02:06:40</td> <td>0 days 02:06:40</td> <td>MH</td> </tr> <tr> <th>4</th> <td>5</td> <td>5</td> <td>KEN</td> <td>M</td> <td>3</td> <td>25</td> <td>Abel</td> <td>Kipchumba</td> <td>–</td> <td>09:15:00</td> <td>...</td> <td>0 days 00:28:55</td> <td>0 days 00:43:35</td> <td>0 days 00:58:14</td> <td>0 days 01:13:07</td> <td>0 days 01:28:03</td> <td>0 days 01:43:08</td> <td>0 days 01:59:14</td> <td>0 days 02:06:49</td> <td>0 days 02:06:49</td> <td>MH</td> </tr> <tr> <th>...</th> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> <td>...</td> </tr> <tr> <th>35566</th> <td>DNF</td> <td>–</td> <td>USA</td> <td>M</td> <td>–</td> <td>65079</td> <td>michael</td> <td>perkowski</td> <td>–</td> <td>–</td> <td>...</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>M65</td> </tr> <tr> <th>35567</th> <td>DNF</td> <td>–</td> <td>USA</td> <td>M</td> <td>–</td> <td>62027</td> <td>Karl</td> <td>Mann</td> <td>–</td> <td>–</td> <td>...</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>M55</td> </tr> <tr> <th>35568</th> <td>DNF</td> <td>–</td> <td>THA</td> <td>F</td> <td>–</td> <td>27196</td> <td>oraluck</td> <td>pichaiwongse</td> <td>STATE to BERLIN 2022</td> <td>–</td> <td>...</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>W55</td> </tr> <tr> <th>35569</th> <td>DNF</td> <td>–</td> <td>SUI</td> <td>M</td> <td>–</td> <td>56544</td> <td>Gerardo</td> <td>GARCIA CALZADA</td> <td>–</td> <td>–</td> <td>...</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>M50</td> </tr> <tr> <th>35570</th> <td>DNF</td> <td>–</td> <td>AUT</td> <td>M</td> <td>–</td> <td>63348</td> <td>Harald</td> <td>Mori</td> <td>Albatros</td> <td>–</td> <td>...</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>NaT</td> <td>M60</td> </tr> </tbody> </table> <p>35571 rows × 23 columns</p> </div> Now you can get some insights about the Berlin Marathon 2022, by using its tailored methods for getting basic and quick insights. For example, the number of finishers, number of participants and the winner info. .. code:: ipython3 print('Total participants', race_result.total_participants) print('Total finishers', race_result.total_finishers) print('Total Non-Finishers', race_result.total_nonfinishers) .. parsed-literal:: Total participants 35571 Total finishers 34844 Total Non-Finishers 727 .. code:: ipython3 race_result.winner .. parsed-literal:: position 1 position_gender 1 country KEN sex M division 1 bib 1 firstname Eliud lastname Kipchoge club – starttime 09:15:00 start_raw_time 09:15:00 half 0 days 00:59:51 5k 0 days 00:14:14 10k 0 days 00:28:23 15k 0 days 00:42:33 20k 0 days 00:56:45 25k 0 days 01:11:08 30k 0 days 01:25:40 35k 0 days 01:40:10 40k 0 days 01:54:53 grosstime 0 days 02:01:09 nettime 0 days 02:01:09 category M35 Name: 0, dtype: object Eliud Kipchoge of Kenya won the 2022 Berlin Marathon in 2:01:09. Kipchoge’s victory was his fourth in Berlin and 17th overall in a career of 19 marathon starts. And who was the women’s race winner? .. code:: ipython3 race_result[(race_result['position_gender'] == 1) & (race_result['sex'] == 'F')].T .. raw:: html <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>32</th> </tr> </thead> <tbody> <tr> <th>position</th> <td>33</td> </tr> <tr> <th>position_gender</th> <td>1</td> </tr> <tr> <th>country</th> <td>ETH</td> </tr> <tr> <th>sex</th> <td>F</td> </tr> <tr> <th>division</th> <td>1</td> </tr> <tr> <th>bib</th> <td>F24</td> </tr> <tr> <th>firstname</th> <td>Tigist</td> </tr> <tr> <th>lastname</th> <td>Assefa</td> </tr> <tr> <th>club</th> <td>–</td> </tr> <tr> <th>starttime</th> <td>09:15:00</td> </tr> <tr> <th>start_raw_time</th> <td>09:15:00</td> </tr> <tr> <th>half</th> <td>0 days 01:08:13</td> </tr> <tr> <th>5k</th> <td>0 days 00:16:22</td> </tr> <tr> <th>10k</th> <td>0 days 00:32:36</td> </tr> <tr> <th>15k</th> <td>0 days 00:48:44</td> </tr> <tr> <th>20k</th> <td>0 days 01:04:43</td> </tr> <tr> <th>25k</th> <td>0 days 01:20:48</td> </tr> <tr> <th>30k</th> <td>0 days 01:36:41</td> </tr> <tr> <th>35k</th> <td>0 days 01:52:27</td> </tr> <tr> <th>40k</th> <td>0 days 02:08:42</td> </tr> <tr> <th>grosstime</th> <td>0 days 02:15:37</td> </tr> <tr> <th>nettime</th> <td>0 days 02:15:37</td> </tr> <tr> <th>category</th> <td>WH</td> </tr> </tbody> </table> </div> Tigist Assefa of Ethiopia won the women’s race in a stunning time of 2:15:37 to set a new course record in Berlin. Runpandas also provides a race’s summary method for showing the compilation of some general insights such as finishers, partipants (by gender and overall). .. code:: ipython3 race_result.summary() .. parsed-literal:: Event name berlin marathon Event type 42k Event country DE Event date 25-09-2022 Number of participants 35571 Number of finishers 34844 Number of non-finishers 727 Number of male finishers 23314 Number of female finishers 11523 Winner Nettime 0 days 02:01:09 dtype: objec Runpandas for some race results come with the splits for the partial distances of the race. We can fetch for any runner the splits using the method ``runpandas.acessors.splits.pick_athlete``. So, if we need to have direct access to all splits from a specific runner, we will use the ``splits`` acesssor. .. code:: ipython3 race_result.splits.pick_athlete(identifier='1') .. raw:: html <div> <style scoped> .dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; } </style> <table border="1" class="dataframe"> <thead> <tr style="text-align: right;"> <th></th> <th>time</th> <th>distance_meters</th> <th>distance_miles</th> </tr> <tr> <th>split</th> <th></th> <th></th> <th></th> </tr> </thead> <tbody> <tr> <th>0k</th> <td>0 days 00:00:00</td> <td>0</td> <td>0.0000</td> </tr> <tr> <th>5k</th> <td>0 days 00:14:14</td> <td>5000</td> <td>3.1069</td> </tr> <tr> <th>10k</th> <td>0 days 00:28:23</td> <td>10000</td> <td>6.2137</td> </tr> <tr> <th>15k</th> <td>0 days 00:42:33</td> <td>15000</td> <td>9.3206</td> </tr> <tr> <th>20k</th> <td>0 days 00:56:45</td> <td>20000</td> <td>12.4274</td> </tr> <tr> <th>half</th> <td>0 days 00:59:51</td> <td>21097</td> <td>13.1091</td> </tr> <tr> <th>25k</th> <td>0 days 01:11:08</td> <td>25000</td> <td>15.5343</td> </tr> <tr> <th>30k</th> <td>0 days 01:25:40</td> <td>30000</td> <td>18.6411</td> </tr> <tr> <th>35k</th> <td>0 days 01:40:10</td> <td>35000</td> <td>21.7480</td> </tr> <tr> <th>40k</th> <td>0 days 01:54:53</td> <td>40000</td> <td>24.8548</td> </tr> <tr> <th>nettime</th> <td>0 days 02:01:09</td> <td>42195</td> <td>26.2187</td> </tr> </tbody> </table> </div> With plotting libraries such as ``matplotlib`` you can analyze the splits data through a impressive visualization! .. code:: ipython3 eliud_kipchoge_splits = race_result.splits.pick_athlete(identifier='1') .. code:: ipython3 def timeTicks(x, pos): seconds = x / 10**9 d = datetime.timedelta(seconds=seconds) return str(d) fig, ax2 = plt.subplots() #plot the splits time #format the y-axis to show the labels as timedelta. formatter = matplotlib.ticker.FuncFormatter(timeTicks) #plot the paces per segment line2, = ax2.plot(eliud_kipchoge_splits_filtered.index, eliud_kipchoge_splits_filtered['pace'], linestyle='dashed', color='cyan', lw=5, alpha=0.8) #plot the overall mean pace line3, = ax2.plot(eliud_kipchoge_splits_filtered.index, eliud_kipchoge_splits_filtered['mean_pace'], color='#1b9e77', linestyle='dashed', lw=5, alpha=0.8) #annotate the pace line with time splits yvalues = line2.get_ydata() for index, y in zip(eliud_kipchoge_splits_filtered.index, yvalues): formated_time = datetime.timedelta(seconds=eliud_kipchoge_splits_filtered.loc[index,'split_time'].total_seconds()) ax2.text(index, y, formated_time, weight="bold", size=12, ) ax2.yaxis.set_major_formatter(formatter) ax2.grid(False) ax2.legend( (line2, line3), ('Splits Time', 'Splits Pace', 'Mean Pace'), loc='lower right', frameon=False ) ax2.set_title("Eliud Kipchoge splits time and pace in Berlin Marathon 2022") ax2.set_xlabel("Splits in kms") ax2.set_ylabel("Pace min/km") plt.show() .. image:: examples/overview_files/5-marathon_analysis_80_0.png Get in touch ------------ - Report bugs, suggest features or view the source code [on GitHub](https://github.com/corriporai/runpandas). I'm very interested in your experience with runpandas. Please drop me an note with any feedback you have. Contributions welcome! \- **Marcel Caraciolo** License ------- Runpandas is licensed under the **MIT License**. A copy of which is included in LICENSE.
/runpandas-0.6.0.tar.gz/runpandas-0.6.0/README.rst
0.857291
0.809878
README.rst
pypi
<div id="top"></div> # Run Jupyter notebooks quietly from command-line [![PyPI](https://img.shields.io/pypi/v/runpynb?color=brightgreen&label=PyPI)](https://pypi.org/project/runpynb/) ![GitHub release (latest by date)](https://img.shields.io/github/v/release/lsys/runpynb?label=Latest%20release) <br> [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/runpynb?label=Python%203.6%2B)](https://pypi.org/project/runpynb/) <br> [![DOI](https://zenodo.org/badge/520408889.svg)](https://zenodo.org/badge/latestdoi/520408889) `runPyNB` is a quick and dirty utility to run (and time) Jupyter notebooks from command-line and makefiles. <!------------------- Quickstart -------------------> ## Quickstart Install from PyPI ```bash pip install runpynb ``` General usage: `runpynb <notebook(s)> [options]` (".ipynb" not required) * `runpynb`: Run all notebooks in directory. <pre> $ runpynb</pre> ![](https://raw.githubusercontent.com/lsys/runpynb/main/assets/_docs/runall.gif) <p align="right">(<a href="#top">back to top</a>)</p> <!------------------------ Usage ----------------------> ## Usage * `runpynb <notebook(s)> -q`: Run quietly (`-q`). <pre> $ runpynb hello.ipynb -q</pre> ![](https://raw.githubusercontent.com/lsys/runpynb/main/assets/_docs/be-quiet.gif) * `runpynb <notebook(s)> -qs`: Run quietly (`-q`) as a sequence of workflow (`-s`). Errors (eg in `error.ipynb`) will break the workflow. <pre> $ runpynb error.ipynb hello.ipynb -qs</pre> ![](https://raw.githubusercontent.com/lsys/runpynb/main/assets/_docs/as-sequence.gif) * `runpynb <notebook(s)> -o`: Save output as separate notebook (`-o`), instead of overwriting existing notebook(s). <pre> $ runpynb hello.ipynb -o</pre> ![](https://raw.githubusercontent.com/lsys/runpynb/main/assets/_docs/output-as-separate-notebook.gif) <p align="right">(<a href="#top">back to top</a>)</p> <!---------------------- Options ----------------------> ## Options ```bash usage: runpynb [-h] [-t TIMEOUT] [-s] [-o] [-v VERSION] [-q] [notebooks ...] Run (and time) Jupyter notebooks silently in command-line. positional arguments: notebooks List of Jupyter notebooks (*.ipynb) to be run (default=all notebooks in path). optional arguments: -h, --help show this help message and exit -t TIMEOUT, --timeout TIMEOUT Seconds until a cell in the notebook timesout, which raises a Timeouterror exception (default is 3000=5 mins). -s, --sequence Sequence implicit in notebook lists. If error occurs somewhere, stop entire pipeline. -o, --output Save output as a separate notebook with "-out"-suffix (e.g. *-out.ipynb) instead of overwriting existing file. -v VERSION, --version VERSION Version of notebook to return (Default=No conversion). Notebook will be converted if necessary. -q, --quiet Be quiet and don't print messages (including run time). Caution: Does not suppress error messages. ``` <p align="right">(<a href="#top">back to top</a>)</p> <!----------------- Project status -----------------> ## Status [![Documentation Status](https://readthedocs.org/projects/runpynb/badge/?version=latest)](https://runpynb.readthedocs.io/en/latest/?badge=latest) <br> [![Build Status](https://app.travis-ci.com/LSYS/runPyNB.svg?branch=main)](https://app.travis-ci.com/LSYS/runPyNB) <br> [![Tests](https://github.com/LSYS/runPyNB/actions/workflows/tests.yml/badge.svg?branch=main)](https://github.com/LSYS/runPyNB/actions/workflows/tests.yml) [![codecov](https://codecov.io/gh/LSYS/runPyNB/branch/main/graph/badge.svg?token=ZtC2IJ07Fa)](https://codecov.io/gh/LSYS/runPyNB) <br> [![CI](https://github.com/LSYS/runPyNB/actions/workflows/build.yml/badge.svg?branch=main)](https://github.com/LSYS/runPyNB/actions/workflows/build.yml) <br> [![CLI](https://github.com/LSYS/runPyNB/actions/workflows/cli.yml/badge.svg?branch=main)](https://github.com/LSYS/runPyNB/actions/workflows/cli.yml) <br> [![Doclinks](https://github.com/LSYS/runPyNB/actions/workflows/doclinks.yml/badge.svg?branch=main)](https://github.com/LSYS/runPyNB/actions/workflows/doclinks.yml) <p align="right">(<a href="#top">back to top</a>)</p> <br> <!---------------------- About ---------------------> ## More on this package This is a lightweight package that wraps around the official Jupyter [`nbformat`](https://nbformat.readthedocs.io/en/latest/) and [`nbconvert`](https://nbconvert.readthedocs.io/en/latest/) modules. My workflow involves using [`Jupyter notebooks`](https://jupyter.org/) to clean, and analyze data. I use this utility to run notebooks silently from the command-line and [`Makefiles`](#usage-with-makefiles) (without converting from `.ipynb` files to `.py` files). Related packages are [`guoquan/runnb`](https://github.com/guoquan/runnb) and [`vinayak-mehta/nbcommands`](https://github.com/vinayak-mehta/nbcommands) with a planned enhancement `nbtime` to run Jupyter notebooks from command-line. <p align="right">(<a href="#top">back to top</a>)</p> <!---------------------- Build ---------------------> ## Usage with Makefiles A minimal workflow where `get-data.ipynb` takes 5000 seconds to prepare `data.csv`. And where `analyze.ipynb` uses `data.csv` to produce `output.png`. ```makefile .DEFAULT_GOAL := output.png data.csv: get-data.ipynb runpynb $^ -t 5000 output.png: analyze.ipynb data.csv runpynb $< ``` <p align="right">(<a href="#top">back to top</a>)</p> <!----------------- Known issues ----------------> ## Known Issues * [Build fails](https://github.com/LSYS/runPyNB/runs/7627883361?check_suite_focus=true) with Python 3.6 in Windows OS. * Notebooks with long execution time will require the `timeout` option (eg `runpynb notebook.ipynb -t 10000`). <p align="right">(<a href="#top">back to top</a>)</p> <!-------------------- License -------------------> ## License This package is licensed under the [MIT License](https://github.com/LSYS/runPyNB/blob/main/LICENSE).
/runpynb-0.2.0.tar.gz/runpynb-0.2.0/README.md
0.578686
0.866472
README.md
pypi
![Build Status](https://github.com/cms-DQM/runregistry_api_client/actions/workflows/test_package.yaml/badge.svg) # Run Registry Client Python client to retrieve and query data from [CMS Run Registry](https://cmsrunregistry.web.cern.ch). To switch to [Dev CMS Run Registry](https://dev-cmsrunregistry.web.cern.ch) do: ```python import runregistry runregistry.setup("development") ``` Possible values are `"production"`, `"development"` (will use the development deployment as target), `"qa"` (will use the new SSO proxy on the production deployment) or `"local"` (if you have a local instance of Run Registry's backend running). ## Python version and Virtual env Python version>=3.6 is required for this package. A virtual environment is also required, if you are in lxplus you should run the following commands: ```bash virtualenv -p `which python3` venv source venv/bin/activate ``` ## Installation ```bash pip install runregistry ``` ## Authentication Prerequisites > **Warning** > Grid certificates have been deprecated by CERN. As of version `1.0.0`, the `runregistry` > client only works with a client ID and a secret. You will need to create an SSO registration for your application which is going to be using the runregistry API client. Instructions on how to do it can be found on the [`cernrequests`](https://github.com/CMSTrackerDPG/cernrequests) GitHub page. Once you have a client ID and a secret, you will need to store them in a file named `.env`. A [sample file](.env_sample) is provided so that you can edit it and rename it to `.env`. Alternatively, you can run `export SSO_CLIENT_ID=...` and `export SSO_CLIENT_SECRET=...` on the same terminal that you will be running your python script in. ## Usage ### Get a single run (get_run): ```python import runregistry run = runregistry.get_run(run_number=328762) ``` ### Query several runs (get_runs): ```python import runregistry runs = runregistry.get_runs(filter={ 'run_number':{ 'or': [328762, 323555, 323444] } }) ``` Apply a custom filter (run_numbers between 309000 and 310000 which had at least one GOOD dt lumisection) ```python import runregistry runs = runregistry.get_runs( filter={ 'run_number': { 'and':[ {'>': 309000}, {'<': 310000} ] }, 'dt-dt': 'GOOD' } ) ``` Do note that we use `dt-dt` ('dt' twice) this is due to the fact that there are multiple workspaces, the first 'dt' states we are in dt workspace, the second 'dt' states we want column 'dt'. So the syntax for status flags is `{workspace}-{column}`. If we wanted runs with the strip column from tracker workspace to have at least 1 lumisection GOOD, the query would look like this: ```python import runregistry runs = runregistry.get_runs( filter={ 'run_number': { 'and':[ {'>': 309000}, {'<': 310000} ] }, 'tracker-strip': 'GOOD' } ) ``` Depending on the attribute you can use different operators: #### Operators | Attribute | Supported operators | | --------- | :-----------------------------: | | number | '=', '>', '<', '>=', '<=', '<>' | | String | =, like, notlike | | Boolean | = (true, false) | | date | '=', '>', '<', '>=', '<=', '<>' | When using `like` or `notlike` operator, you must surround your query with percentage signs, see example below. When filtering for triplet attributes (anything that is GOOD/BAD/STANDBY...) you must not use any String values, the only value allowed is strict equality '=' and is set by default. The values allowed are GOOD, BAD, STANDBY, NOTSET, EXCLUDED and EMPTY. You can combine the filters as well: ```python import runregistry runs = runregistry.get_runs( filter={ 'run_number': { 'and':[ {'>': 309000}, {'<': 310000} ] }, 'hlt_key': { 'like': '%commissioning2018%' } 'significant': { '=': True } } ) ``` If by observing the Network Requests in RR web application, you want to use the same filters observed by the network request. Just passs `ignore_filter_transformation=True` to any query. Example (run_numbers between 309000 and 310000 which had at least one GOOD dt lumisection): ```python import runregistry runs = runregistry.get_runs( filter={ 'run_number': { 'and':[ {'>': 309000}, {'<': 310000} ] }, # Remember! this will only work if you pass ignore_filter_transformation=True (please read above what this means), otherwise use the other examples 'oms_attributes.hlt_key': { 'like': '%commissioning2018%' }, 'triplet_summary.dt-dt.GOOD': { '>': 0 } }, ignore_filter_transformation=True ) ``` Also, if by observing the Network Requests in RR web application, you want to obtain the data as it is seen in the network requests. Just `compress_attributes=False`, for example: ```python import runregistry runs = runregistry.get_runs( filter={ 'run_number': { 'and':[ {'>': 309000}, {'<': 310000} ] }, 'dt': 'GOOD' }, compress_attributes=False ) ``` querying by comments and cause is not yet possible ### Get dataset ```python import runregistry dataset = runregistry.get_dataset( run_number=327604, dataset_name="/PromptReco/HICosmics18A/DQM" ) ``` ### Get datasets ```python import runregistry datasets = runregistry.get_datasets( filter={ 'run_number': { 'and':[ {'>': 309000}, {'<': 310000} ] } } ) ``` ### Get Lumisections #### Get the array of lumisections You can query the lumisections of a run (or dataset), you will need the run number and the dataset name (when querying for a run, the dataset name must be 'online') ```python import runregistry # lumisections = runregistry.get_lumisections(run_number, dataset_name) lumisections = runregistry.get_lumisections(327743, "/PromptReco/HICosmics18A/DQM") ``` The response will be an array of lumisections which will contain `{workspace}-{column}: {"status":"Either GOOD/BAD/STANDBY...", "comment": "a comment made for the range", "cause":"a common repeated cause"}` To get OMS data: use the OMS API. You should only use Run Registry for data that RR is responsible for. However if you still want to access OMS lumisections, you can do so like this: Previous Run Registry allowed you to change OMS (in that time WBM) attributes per dataset, if you need certain dataset lumisections you can provide the name of the RR dataset in the second argument: ```python import runregistry # oms_lumisections = runregistry.get_oms_lumisections(run_number, dataset_name) oms_lumisections = get_oms_lumisections(327743, 'online') # If you want to get particular dataset that is not online for OMS lumisections: dataset_oms_lumisections = get_oms_lumisections(327743, '/PromptReco/HICosmics18A/DQM') ``` #### Get lumisection ranges Usually there will be runs/datasets which contain an enormous amount of lumisections (some even more than 5000), therefore it can be heavy on the API to query for these type of lumisections. A query to retrieve ranges is also possible, you can do it like this: ```python import runregistry # lumisections = runregistry.get_lumisection_ranges(run_number, dataset_name) lumisections = runregistry.get_lumisection_ranges(327743, "/PromptReco/HICosmics18A/DQM") ``` You will receive an array of ranges, that apart from stating the triplets (comment, status and cause) for each column, the array will consist of two more attributes called **start** (lumisection where range starts) and **end** (lumisection where range ends). ### Handling the response When filtering runs, the attributes from the response get divided into those belonging to OMS and those belonging to RR (to see which belong to which, see the tables below, or go through a response). Those that belong to OMS are inside "oms_attributes". Those that belong to RR are inside "rr_attributes". ### Attributes available to query According to the type of attribute (number, string, boolean), see the Operator table above to see which types of operators can be applied to querying Oms Attributes: | Attribute | Type | Belongs to | | ------------------------------------------------------------------------------------------ | :-----: | :--------: | | run_number | number | OMS | | energy | number | OMS | | l1_key | string | OMS | | b_field | number | OMS | | hlt_key | string | OMS | | l1_menu | string | OMS | | l1_rate | number | OMS | | duration | number | OMS | | end_lumi | number | OMS | | end_time | date | OMS | | sequence | string | OMS | | init_lumi | number | OMS | | clock_type | string | OMS | | start_time | date | OMS | | fill_number | number | OMS | | l1_hlt_mode | string | OMS | | last_update | date | OMS | | ls_duration | number | OMS | | stable_beam | boolean | OMS | | trigger_mode | string | OMS | | cmssw_version | string | OMS | | recorded_lumi | number | OMS | | delivered_lumi | number | OMS | | tier0_transfer | boolean | OMS | | l1_key_stripped | string | OMS | | fill_type_party1 | string | OMS | | fill_type_party2 | string | OMS | | hlt_physics_rate | number | OMS | | hlt_physics_size | number | OMS | | fill_type_runtime | string | OMS | | hlt_physics_counter | number | OMS | | l1_triggers_counter | number | OMS | | l1_hlt_mode_stripped | string | OMS | | hlt_physics_throughput | number | OMS | | initial_prescale_index | number | OMS | | beams_present_and_stable | boolean | OMS | | es_included | boolean | OMS | | hf_included | boolean | OMS | | daq_included | boolean | OMS | | dcs_included | boolean | OMS | | dqm_included | boolean | OMS | | gem_included | boolean | OMS | | trg_included | boolean | OMS | | hcal_included | boolean | OMS | | tcds_included | boolean | OMS | | pixel_included | boolean | OMS | | tracker_included | boolean | OMS | | \*\_included (be sure to add it to the validation runregistry/attributes if it's not here) | boolean | OMS | RR Run Attributes: | Attribute | Type | Belongs to | | ----------- | :-----: | :--------: | | class | string | RR | | state | string | RR | | significant | boolean | RR | | stop_reason | string | RR | RR Dataset Attributes: | Attribute | Type | Belongs to | | ------------- | :----: | :--------: | | dataset_name | string | RR | | dt_state | string | RR | | csc_state | string | RR | | hlt_state | string | RR | | l1t_state | string | RR | | rpc_state | string | RR | | tau_state | string | RR | | btag_state | string | RR | | ecal_state | string | RR | | hcal_state | string | RR | | lumi_state | string | RR | | muon_state | string | RR | | ctpps_state | string | RR | | castor_state | string | RR | | egamma_state | string | RR | | global_state | string | RR | | jetmet_state | string | RR | | tracker_state | string | RR | The dt_state, csc_state and so on, are the workspace OFFLINE states of the datasets, they can be either OPEN, SIGNOFF or COMPLETED. For Offline and Online status flags, filtering is also available. The Attribute is composed by `{workspace}-{column}`. So for example if we want to query for GOOD tracker-strip datasets of runs between 309000 and 310000, we would do it like this: ```python import runregistry datasets = runregistry.get_datasets(filter={ 'tracker-strip':'GOOD' 'run_number': {'and': [{'>': 309000}, {'<': 310000}]}, }) ``` ## Generating JSONs In order to generate JSONs (like the golden json) you must send the configuration of the attributes you wish the generated json to satisfy (in json-logic) The json logic below generates a json file for the dataset name: "/PromptReco/Collisions2018A/DQM" although you can use placeholders just as in the json portal as: /PromptReco/Collisions2018(A|B)/DQM or /PromptReco/Collisions2018\_/DQM the underscore '\_' is a wildcard. ```python import runregistry json_logic = { "and": [ { ">=": [{ "var": "run.oms.energy" }, 6000] }, { "<=": [{ "var": "run.oms.energy" }, 7000] }, { ">=": [{ "var": "run.oms.b_field" }, 3.7] }, { "in": [ "25ns", { "var": "run.oms.injection_scheme" }] }, { "==": [{ "in": [ "WMass", { "var": "run.oms.hlt_key" }] }, False] }, { "==": [{ "var": "lumisection.rr.dt-dt" }, "GOOD"] }, { "==": [{ "var": "lumisection.rr.csc-csc" }, "GOOD"] }, { "==": [{ "var": "lumisection.rr.l1t-l1tmu" }, "GOOD"] }, { "==": [{ "var": "lumisection.rr.l1t-l1tcalo" }, "GOOD"] }, { "==": [{ "var": "lumisection.rr.hlt-hlt" }, "GOOD"] }, { "==": [{ "var": "lumisection.oms.bpix_ready" }, True] } ] } generated_json = runregistry.create_json(json_logic=json_logic, dataset_name_filter="/PromptReco/Collisions2018A/DQM") ``` ### Advanced You can also manipulate runs via API: 1. Mark run significant: ```python runregistry.make_significant_runs(run=362761) ``` 2. Reset RR attributes and reload data from OMS: ```python runregistry.reset_RR_attributes_and_refresh_runs(run=362761) ``` 3. Move runs from one state to another: ```python runregistry.move_runs("OPEN", "SIGNOFF", run=362761) ``` ## Troubleshooting ### Support If you have any questions, or the client is not working properly feel free to drop our team an email at [cms-dqm-coreTeam@cern.ch](mailto:cms-dqm-coreTeam@cern.ch). ### [Package developers] Updating the package on PyPI ```bash python3 -m pip install --upgrade pip build twine python3 -m build python3 -m twine upload --skip-existing --repository pypi dist/* ``` Instructions from [here](https://packaging.python.org/en/latest/tutorials/packaging-projects/). ## Testing ### Locally > **TODO** > Remove the qa environment after migration. You will be needing a file named `.env` with the following variables ```bash SSO_CLIENT_ID=<change> SSO_CLIENT_SECRET=<change> ENVIRONMENT=qa ``` While most of the tests work on the development deployment, some fail and need the production one. This is the reason we are setting `ENVIRONMENT=qa`. ```bash python3 -m venv venv source venv/bin/activate pip install -r requirements.txt pip install -r testing-requirements.txt pip install -e . pytest tests -s ``` ### GitHub actions Automated GitHub actions run on each push to the repository. The workflow is defined [here](./.github/workflows/test_package.yaml) The same env variables are needed as in [local testing](#locally), so those are added [here](https://github.com/cms-DQM/runregistry_api_client/settings/secrets/actions). ## FAQ ### Does this work with Python 2.7? No. ### Should I be using `runregistry_api_client` for getting OMS data? No*. Our recommendation is to query Run Registry only for data that RR is responsible for. <small>*It's not that you can't, it's just that this puts extra burden on the application, making it slow for everyone.</small> ### Is the token stored somewhere and reused? No, almost every function call gets a new token. This is not ideal, and it may be improved in the future.
/runregistry-1.0.0.tar.gz/runregistry-1.0.0/README.md
0.447702
0.906446
README.md
pypi
[![Contributors][contributors-shield]][contributors-url] [![Forks][forks-shield]][forks-url] [![Stargazers][stars-shield]][stars-url] [![Issues][issues-shield]][issues-url] [![MIT License][license-shield]][license-url] [![LinkedIn][linkedin-shield]][linkedin-url] <!-- PROJECT LOGO --> <br /> <div> <p> <a href="https://github.com/kpwhri/runrex"> <img src="images/logo.png" alt="Logo"> </a> </p> <h3 align="center">Runrex</h3> <p> Library to aid in organizing, running, and debugging regular expressions against large bodies of text. </p> </div> <!-- TABLE OF CONTENTS --> ## Table of Contents * [About the Project](#about-the-project) * [Getting Started](#getting-started) * [Prerequisites](#prerequisites) * [Installation](#installation) * [Usage](#usage) * [Roadmap](#roadmap) * [Contributing](#contributing) * [License](#license) * [Contact](#contact) * [Acknowledgements](#acknowledgements) ## About the Project The goal of this library is to simplify the deployment of regular expression on large bodies of text, in a variety of input formats. <!-- GETTING STARTED --> ## Getting Started To get a local copy up and running follow these simple steps. ### Prerequisites * Python 3.8+ * runrex package: https://github.com/kpwhri/runrex ### Installation 1. Clone the repo ```sh git clone https://github.com/kpwhri/runrex.git ``` 2. Install requirements (`requirements-dev` is for test packages) ```sh pip install -r requirements.txt -r requirements-dev.txt ``` 3. If you wish to read text from SAS or SQL, you will need to install additional requirements. These additional requirements files may be of use: - ODBC-connection: `requirements-db.txt` - Postgres: `requirements-psql.txt` - SAS: `requirements-sas.txt` 4. Run tests. ```sh set/export PYTHONPATH=src pytest tests ``` ## Usage ### Example Implementations * [Social Isolation](https://github.com/kpwhri/social-isolation-runrex) * [Acute Pancreatitis](https://github.com/kpwhri/apanc-runrex) * [Anaphylaxis](https://github.com/kpwhri/anaphylaxis-runrex) * [PCOS](https://github.com/kpwhri/pcos-runrex) ### Build Customized Algorithm * Create 4 files: * `patterns.py`: defines regular expressions of interest * See `examples/example_patterns.py` for some examples * `test_patterns.py`: tests for those regular expressions * Why? Make sure the patterns do what you think they do * `algorithm.py`: defines algorithm (how to use regular expressions); returns a Result * See `examples/example_algorithm.py` for guidance * `config.(py|json|yaml)`: various configurations defined in `schema.py` * See example in `examples/example_config.py` for basic config ## Input Data Accepts a variety of input formats, but will need to at least specify a `document_id` and `document_text`. The names are configurable. ### Sentence Splitting By default, the input document text is expected to have each sentence on a separate line. If a sentence splitting scheme is desired, it will need to be supplied to the application. ### Schema/Examples For more details, see the [example config](https://github.com/kpwhri/runrex/blob/master/examples/example_config.py) or consult the [schema](https://github.com/kpwhri/runrex/blob/master/src/runrex/schema.py) ## Output Format * Recommended output format is `jsonl` - The data can be extracted using python: ```python import json with open('output.jsonl') as fh: for line in fh: data = json.loads(line) # data is dict ``` * Output variables are configurable and can include: - **id**: unique id for line - **name**: document name - **algorithm**: name of algorithm with finding - **value** - **category**: name of category (usually the pattern; multiple categories contribute to an algorithm) - **date** - **extras** - **matches**: pattern matches - **text**: captured text - **start**: start index/offset of match - **end**: end index/offset of match * Scripts to accomplish useful tasks with the output are included in the `scripts` directory. ## Versions Uses [SEMVER](https://semver.org/). See https://github.com/kpwhri/runrex/releases. <!-- ROADMAP --> ## Roadmap See the [open issues](https://github.com/kpwhri/runrex/issues) for a list of proposed features (and known issues). <!-- CONTRIBUTING --> ## Contributing Any contributions you make are **greatly appreciated**. 1. Fork the Project 2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`) 3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`) 4. Push to the Branch (`git push origin feature/AmazingFeature`) 5. Open a Pull Request <!-- LICENSE --> ## License Distributed under the MIT License. See `LICENSE` or https://kpwhri.mit-license.org for more information. <!-- CONTACT --> ## Contact Please use the [issue tracker](https://github.com/kpwhri/runrex/issues). <!-- ACKNOWLEDGEMENTS --> ## Acknowledgements <!-- MARKDOWN LINKS & IMAGES --> <!-- https://www.markdownguide.org/basic-syntax/#reference-style-links --> [contributors-shield]: https://img.shields.io/github/contributors/kpwhri/runrex.svg?style=flat-square [contributors-url]: https://github.com/kpwhri/runrex/graphs/contributors [forks-shield]: https://img.shields.io/github/forks/kpwhri/runrex.svg?style=flat-square [forks-url]: https://github.com/kpwhri/runrex/network/members [stars-shield]: https://img.shields.io/github/stars/kpwhri/runrex.svg?style=flat-square [stars-url]: https://github.com/kpwhri/runrex/stargazers [issues-shield]: https://img.shields.io/github/issues/kpwhri/runrex.svg?style=flat-square [issues-url]: https://github.com/kpwhri/runrex/issues [license-shield]: https://img.shields.io/github/license/kpwhri/runrex.svg?style=flat-square [license-url]: https://kpwhri.mit-license.org/ [linkedin-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=flat-square&logo=linkedin&colorB=555 [linkedin-url]: https://www.linkedin.com/company/kaiserpermanentewashingtonresearch <!-- [product-screenshot]: images/screenshot.png -->
/runrex-0.4.2.tar.gz/runrex-0.4.2/README.md
0.93408
0.743866
README.md
pypi
RunStats: Computing Statistics and Regression in One Pass ========================================================= `RunStats`_ is an Apache2 licensed Python module for online statistics and online regression. Statistics and regression summaries are computed in a single pass. Previous values are not recorded in summaries. Long running systems often generate numbers summarizing performance. It could be the latency of a response or the time between requests. It's often useful to use these numbers in summary statistics like the arithmetic mean, minimum, standard deviation, etc. When many values are generated, computing these summaries can be computationally intensive. It may even be infeasible to keep every recorded value. In such cases computing online statistics and online regression is necessary. In other cases, you may only have one opportunity to observe all the recorded values. Python's generators work exactly this way. Traditional methods for calculating the variance and other higher moments requires multiple passes over the data. With generators, this is not possible and so computing statistics in a single pass is necessary. The Python `RunStats`_ module was designed for these cases by providing a pair of classes for computing online summary statistics and online linear regression in a single pass. Summary objects work on sequences which may be larger than memory or disk space permit. They may also be efficiently combined together to create aggregate summaries. Features -------- - Pure-Python - Fully Documented - 100% Test Coverage - Numerically Stable - Optional Cython-optimized Extension (20-40 times faster) - Statistics summary computes mean, variance, standard deviation, skewness, kurtosis, minimum and maximum. - Regression summary computes slope, intercept and correlation. - Developed on Python 3.7 - Tested on CPython 2.7, 3.4, 3.5, 3.6, 3.7 and PyPy, PyPy3 - Tested using Travis CI .. image:: https://api.travis-ci.org/grantjenks/python-runstats.svg?branch=master :target: http://www.grantjenks.com/docs/runstats/ .. image:: https://ci.appveyor.com/api/projects/status/github/grantjenks/python-runstats?branch=master&svg=true :target: http://www.grantjenks.com/docs/runstats/ Quickstart ---------- Installing `RunStats`_ is simple with `pip <http://www.pip-installer.org/>`_:: $ pip install runstats If you want the Cython-optimized version then first install `Cython <http://cython.org/>`_:: $ pip install cython $ pip install runstats You can access documentation in the interpreter with Python's built-in help function: .. code-block:: python >>> from runstats import Statistics, Regression >>> help(Statistics) >>> help(Regression) Tutorial -------- The Python `RunStats`_ module provides two types for computing running Statistics and Regression. The Regression object leverages Statistics internally for its calculations. Each can be initialized without arguments: .. code-block:: python >>> from runstats import Statistics, Regression >>> stats = Statistics() >>> regr = Regression() Statistics objects support four methods for modification. Use `push` to add values to the summary, `clear` to reset the summary, sum to combine Statistics summaries and multiply to weight summary Statistics by a scalar. .. code-block:: python >>> for num in range(10): ... stats.push(num) >>> stats.mean() 4.5 >>> stats.maximum() 9 >>> stats += stats >>> stats.mean() 4.5 >>> stats.variance() 8.68421052631579 >>> len(stats) 20 >>> stats *= 2 >>> len(stats) 40 >>> stats.clear() >>> len(stats) 0 >>> stats.minimum() is None True Use the Python built-in `len` for the number of pushed values. Unfortunately the Python `min` and `max` built-ins may not be used for the minimum and maximum as sequences are instead expected. There are instead `minimum` and `maximum` methods which are provided for that purpose: .. code-block:: python >>> import random >>> random.seed(0) >>> for __ in range(1000): ... stats.push(random.random()) >>> len(stats) 1000 >>> min(stats) Traceback (most recent call last): ... TypeError: iteration over non-sequence >>> stats.minimum() 0.00024069652516689466 >>> stats.maximum() 0.9996851255769114 Statistics summaries provide five measures of a series: mean, variance, standard deviation, skewness and kurtosis: .. code-block:: python >>> stats = Statistics([1, 2, 5, 12, 5, 2, 1]) >>> stats.mean() 4.0 >>> stats.variance() 15.33333333333333 >>> stats.stddev() 3.915780041490243 >>> stats.skewness() 1.33122127314735 >>> stats.kurtosis() 0.5496219281663506 All internal calculations use Python's `float` type. Like Statistics, the Regression type supports some methods for modification: `push`, `clear` and sum: .. code-block:: python >>> regr.clear() >>> len(regr) 0 >>> for num in range(10): ... regr.push(num, num + 5) >>> len(regr) 10 >>> regr.slope() 1.0 >>> more = Regression((num, num + 5) for num in range(10, 20)) >>> total = regr + more >>> len(total) 20 >>> total.slope() 1.0 >>> total.intercept() 5.0 >>> total.correlation() 1.0 Regression summaries provide three measures of a series of pairs: slope, intercept and correlation. Note that, as a regression, the points need not exactly lie on a line: .. code-block:: python >>> regr = Regression([(1.2, 1.9), (3, 5.1), (4.9, 8.1), (7, 11)]) >>> regr.slope() 1.5668320150154176 >>> regr.intercept() 0.21850113956294415 >>> regr.correlation() 0.9983810791694997 Both constructors accept an optional iterable that is consumed and pushed into the summary. Note that you may pass a generator as an iterable and the generator will be entirely consumed. All internal calculations are based entirely on the C++ code by John Cook as posted in a couple of articles: * `Computing Skewness and Kurtosis in One Pass`_ * `Computing Linear Regression in One Pass`_ .. _`Computing Skewness and Kurtosis in One Pass`: http://www.johndcook.com/blog/skewness_kurtosis/ .. _`Computing Linear Regression in One Pass`: http://www.johndcook.com/blog/running_regression/ The pure-Python and Cython-optimized versions of `RunStats`_ are each directly available if preferred. .. code-block:: python >>> from runstats.core import Statistics, Regression # pure-Python >>> from runstats.fast import Statistics, Regression # Cython-optimized When importing from `runstats` the `fast` version is preferred and the `core` version is used as fallback. Micro-benchmarking Statistics and Regression by calling `push` repeatedly shows the Cython-optimized extension as 20-40 times faster than the pure-Python extension. .. _`RunStats`: http://www.grantjenks.com/docs/runstats/ Reference and Indices --------------------- * `RunStats Documentation`_ * `RunStats API Reference`_ * `RunStats at PyPI`_ * `RunStats at GitHub`_ * `RunStats Issue Tracker`_ .. _`RunStats Documentation`: http://www.grantjenks.com/docs/runstats/ .. _`RunStats API Reference`: http://www.grantjenks.com/docs/runstats/api.html .. _`RunStats at PyPI`: https://pypi.python.org/pypi/runstats/ .. _`RunStats at GitHub`: https://github.com/grantjenks/python-runstats/ .. _`RunStats Issue Tracker`: https://github.com/grantjenks/python-runstats/issues/ License ------- Copyright 2013-2019 Grant Jenks Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
/runstats-1.8.0.tar.gz/runstats-1.8.0/README.rst
0.944511
0.917598
README.rst
pypi
# runtasks A simple task runner for Python that is useful for build scripts. It is designed to allow Python functions to be called from the command line with no frameworks to get in the way. The "run" utility searches up the directory tree for a file named "tasks.py". Any functions decorated with `@task` are callable from the command line. For example, given a tasks.py file like this: from runtasks import task @task def hello(who='World'): "Prints a friendly greeting" print('Hello, {}!'.format(who)) the hello function can be called in any of these ways: ``` text $ run hello Hello, World! $ run hello who=Bob Hello, Bob! $ run hello Bob Hello, Bob! $ run hello -w Bob Hello, Bob! ``` Use the `--list` option to print the list of tasks, their options, and documentation strings: ``` text $ run --list Available tasks: hello who='World' Prints a friendly greeting ``` Use `--help` to get help on any of the tasks: ``` text $ run hello --help hello who='World' Prints a friendly greeting ``` ### Features * Commands are simple Python functions. * Flexible command line parsing. * Supports text arguments, Boolean flags, integers, and counters. ## Command Line Parsing Command line input is extremely flexible and yet is designed to not require any configuration - just the function signature. Multiple tasks can be passed on the command line and they are executed in the order specified: ``` text $ run createdb testdb smoketests ``` Arguments can be passed to tasks in a number of formats: * Name and Value: * `run hello --who=Bob` * `run hello --who Bob` * `run hello who=Bob` * Value: * `run hello Bob` * Flags: * `run hello --debug` * `run hello --no-debug` * `run hello -d` * Counters: * `run hello -vvv` ### Value Parameters Parameters can be provided values. If a parameter does not have a default value, a value must be provided. @task def sometask(arg1, arg2='defvalue'): print('arg1={} arg2={}'.format(arg1, arg2)) Values can be provided in a few ways: ``` text $ run sometask --arg1=value1 --arg2=value2 arg1=value1 arg2=value2 $ run sometask --arg2=value2 --arg1=value1 arg1=value1 arg2=value2 $ run sometask --arg1 value1 arg1=value1 arg2=defvalue $ run sometask arg1=value1 arg1=value1 arg2=defvalue $ run sometask value1 arg1=value1 arg2=defvalue $ run sometask value1 value2 arg1=value1 arg2=value2 $ runtask sometask Task sometask argument arg1 was not provided a value ``` Notice that arguments can be provided in any order when providing the name of the argument. To provide values without names (e.g. `runtask sometask value1 value2`), the arguments must be in order. These are only accepted before arguments with names, so the following is not valid: ``` text # NOT VALID $ run sometask --arg1=value1 value2 ``` Since arg1 was provided with a name, "value2" will be assumed to be the next task name to run. ### Flag Parameters When a default parameter is set to `True` or `False`, the argument generally does not accept a value on the command line. Instead, the argument name itself is accepted to mean the flag should be set to `True` and the argument name preceded by "no-" is accepted to mean the flag should be set to `False`. @task def sometask(flag1=False, flag2=True): print(flag1, flag2) $ run sometask False True $ run sometask --flag1 True True $ run sometask --no-flag2 False False $ run sometask --no-flag1 False true Note that flag parameter names require dashes. The following does not work because it will try to pass the value "flag1" to the first parameter, but values are not accepted for flags: ``` text $ run sometask flag1 Task sometask flag1 parameter is a flag. Invalid value 'flag1' ``` It is usually not necessary, but the values 0 and 1 can be passed to Boolean parameters: ``` text $ run sometask --flag1 1 True True $ run sometask --flag2 0 False False $ run sometask 1 True True ``` ### Integers When a default parameter is an integer, the value passed on the command line will be converted to an integer. ``` text @task def inttask(n=123): print(n, type(n)) $ run inttask 33 33 <class 'int'> ``` ### Counters If an integer parameter is used but a value is not provided, the parameter is treated as a *counter* and is incremented. This makes it easy to implement things like verbosity counters: ``` text @task def counter(verbosity=0): print(verbosity) $ run counter --verbosity 1 $ run counter -vvv 3 ``` ### Short Names Arguments can also be provided using a single dash and the first letter of the parameter name, as long as the first letter is unique. @task def sometask(arg1=None, flag=False): print('arg1={} flag={}'.format(arg1, flag)) $ run sometask -a value1 arg1=value1 flag=False $ run sometask -a=value1 arg1=value1 flag=False $ run sometask -f arg1=None flag=True $ run sometask -fa=value1 arg1=value1 flag=True $ run sometask -fa value1 arg1=value1 flag=True You can override the character assigned to a parameter using the task decorator, which is particularly handy when the first letters are not unique. @task(flags={'x': 'flag'}) def sometask(flag=False, forward=False): print('flag={} forward={}'.format(flag, forward)) $ run sometask flag=False forward=False $ run sometask -x flag=True forward=False $ run sometask -f flag=False forward=True $ run sometask -fx flag=True forward=True Notice that "f" is assigned to the "forward" parameter since it is the only remaining parameter that starts with "f" now that "flag" is assigned to "x". ## Rationale The distutils package is great for packaging, but in the past I'd also used it for defining a myriad of per-project utility scripts (setup a test database, etc.). I'd used distutils because it was built-in, but, honestly, it's design is terrible, the command line parsing always requires some option name with dashes, and it does bizarre things without telling you. For example, if you define a user option named "user", it will be silently ignored when running in a virtual environment! (That was the last straw!) All I really wanted was a single script I could invoke with command line parsing that "does what I want". I looked at many other packages but the only one close to this simplicity was Invoke. Unfortunately the configuration for Invoke was way to complicated for me to figure out, particularly how to update the configuration in one task for later tasks to use. I also don't mind if some combinations are ambiguous. Command line convenience is most important here.
/runtasks-3.6.0.tar.gz/runtasks-3.6.0/README.md
0.780244
0.880951
README.md
pypi
from xml.sax.saxutils import XMLFilterBase class text_normalize_filter(XMLFilterBase): """ SAX filter to ensure that contiguous white space nodes are delivered merged into a single node """ def __init__(self, upstream, downstream): XMLFilterBase.__init__(self, upstream) self._downstream = downstream self._accumulator = [] return def _complete_text_node(self): if self._accumulator: self._downstream.characters(''.join(self._accumulator)) self._accumulator = [] return def startElement(self, name, attrs): self._complete_text_node() self._downstream.startElement(name, attrs) return def startElementNS(self, name, qname, attrs): self._complete_text_node() self._downstream.startElementNS(name, qname, attrs) return def endElement(self, name): self._complete_text_node() self._downstream.endElement(name) return def endElementNS(self, name, qname): self._complete_text_node() self._downstream.endElementNS(name, qname) return def processingInstruction(self, target, body): self._complete_text_node() self._downstream.processingInstruction(target, body) return def comment(self, body): self._complete_text_node() self._downstream.comment(body) return def characters(self, text): self._accumulator.append(text) return def ignorableWhitespace(self, ws): self._accumulator.append(text) return if __name__ == "__main__": import sys from xml import sax from xml.sax.saxutils import XMLGenerator parser = sax.make_parser() #XMLGenerator is a special SAX handler that merely writes #SAX events back into an XML document downstream_handler = XMLGenerator() #upstream, the parser, downstream, the next handler in the chain filter_handler = text_normalize_filter(parser, downstream_handler) #The SAX filter base is designed so that the filter takes #on much of the interface of the parser itself, including the #"parse" method filter_handler.parse(sys.argv[1])
/runtilities-2.2.0.tar.gz/runtilities-2.2.0/running/textnormalize.py
0.669421
0.153296
textnormalize.py
pypi
from argparse import ArgumentParser from csv import DictWriter from datetime import timedelta # pypi from loutilities.xmldict import ConvertXmlToDict class ParameterError(Exception): pass def dist2miles(distel): dist = float(distel['_text']) distunits = distel['unit'] if distunits == 'mi': pass elif distunits == 'km': dist /= 1.609344 elif distunits == 'm': dist /= 1609.344 else: raise ParameterError(f'unexpected distance unit {distunits}') return dist def convertsecs(dur): return str(timedelta(seconds = float(dur))) def main(): parser = ArgumentParser() parser.add_argument('-X', '--xmlfile', help='input xml file, from RunningAHEAD export', required=True) parser.add_argument('-C', '--csvfile', help='output csv file', required=True) args = parser.parse_args() fieldnames = 'date,time,type,subtype,dist,duration,equipment,route,temp,notes'.split(',') workouts = ConvertXmlToDict(args.xmlfile) with open(args.csvfile, 'w', newline='') as oscsvfile: csvfile = DictWriter(oscsvfile, fieldnames=fieldnames) csvfile.writeheader() for wo in workouts['RunningAHEADLog']['EventCollection']['Event']: if wo['typeName'] not in ['Run', 'Bike', 'Walk']: continue datetime = wo['time'] datetimesplit = datetime.split('T') date = datetimesplit[0] time = datetimesplit[1] if len(datetimesplit) == 2 else '' time = time[:-1] if time and time[-1] == 'Z' else time row = { 'date': date, 'time': time, 'type': wo['typeName'], 'subtype': wo['subtypeName'] if 'subtypeName' in wo else '', 'dist': dist2miles(wo['Distance']) if 'Distance' in wo else '', 'duration': convertsecs(wo['Duration']['seconds']) if 'Duration' in wo else '', 'equipment': wo['Equipment']['_text'] if 'Equipment' in wo else '', 'route': wo['Route']['_text'] if 'Route' in wo else '', 'temp': wo['EnvironmentalConditions']['Temperature']['_text'] if 'EnvironmentalConditions' in wo and 'Temperature' in wo['EnvironmentalConditions'] else '', 'notes': wo['Notes'] if 'Notes' in wo else '', } csvfile.writerow(row) if __name__ == "__main__": main()
/runtilities-2.2.0.tar.gz/runtilities-2.2.0/running/parseralogxml.py
0.533884
0.174551
parseralogxml.py
pypi
![license](https://img.shields.io/pypi/l/runtime-config-py?style=for-the-badge) ![python version](https://img.shields.io/pypi/pyversions/runtime-config-py?style=for-the-badge) [![version](https://img.shields.io/pypi/v/runtime-config-py?style=for-the-badge)](https://pypi.org/project/runtime-config-py/) [![coverage](https://img.shields.io/codecov/c/github/runtime-config/runtime-config-py/master?style=for-the-badge)](https://app.codecov.io/gh/runtime-config/runtime-config-py) [![tests status](https://img.shields.io/github/workflow/status/runtime-config/runtime-config-py/Tests/master?style=for-the-badge)](https://github.com/runtime-config/runtime-config-py/actions?query=branch%3Amaster) [![](https://img.shields.io/pypi/dm/runtime-config-py?style=for-the-badge)](https://pypi.org/project/runtime-config-py/) runtime-config-py ================= This library allows you to update project settings at runtime. In its basic use case, it is just a client for the [server](https://github.com/runtime-config/runtime-config), but if necessary, you can implement your adapter for the desired source and get settings from them. runtime-config-py supports Python 3.8+. Examples of using: - Create feature flags to control which features are enabled for users. Feature flags are especially useful when the service is based on a microservice architecture and the addition of a new feature affects multiple services. - Quick response to problems in project infrastructure. For example, if one of consumers sends too many requests to another service, and you need to reduce its performance. Table of contents: - [Installation](#installation) - [Usage](#usage) - [Backend](#backend) - [Development](#development) - [Tests](#tests) - [Style code](#style-code) # Installation You can install the library like this: - from pypi ``` pip install "runtime-config-py[aiohttp]" ``` or ``` poetry add runtime-config-py -E aiohttp ``` - from git: ``` pip install git+https://github.com/runtime-config/runtime-config-py.git#egg="runtime-config-py[aiohttp]" ``` Source dependencies have been moved to extras to give you more control over which libraries are installed. If you have a project dependency on a certain version of aiohttp you can install the library without specifying extras. ``` pip install runtime-config-py ``` # Usage Examples of using the library can be found [here](./example). Let's see a simple example of using this library together with aiohttp application. ```python from aiohttp import web from runtime_config import RuntimeConfig from runtime_config.sources import ConfigServerSrc async def hello(request): name = request.app['config'].name return web.Response(text=f'Hello world {name}!') async def init(application): source = ConfigServerSrc(host='http://127.0.0.1:8080', service_name='hello_world') config = await RuntimeConfig.create(init_settings={'name': 'Alex'}, source=source) application['config'] = config async def shutdown(application): await application['config'].close() app = web.Application() app.on_startup.append(init) app.on_shutdown.append(shutdown) app.add_routes([web.get('/', hello)]) web.run_app(app, port=5000) ``` Before running this code, you need to run [server](https://github.com/runtime-config/runtime-config) from which this library can take new values for your variables. If you don't do this, nothing bad will not happen. You simply cannot change the value of the name variable at runtime :) **Automatic source initialization** You can simplify library initialization by automatically creating a source instance. Simply define the following environment variables and the source instance will be created automatically: - RUNTIME_CONFIG_HOST - RUNTIME_CONFIG_SERVICE_NAME **Ways to access settings** This library supports several ways to access variables. All of them are shown below: ```python print(config.name) print(config['name']) print(config.get('name', default='Dima')) ``` # Backend Currently, only 1 [backend](https://github.com/runtime-config/runtime-config) is supported. Later, support for other backends, such as redis, will probably be added to the library, but this is not in the nearest plans. If you need support for another settings storage source right now, you can write your own source. Implementing this is very simple. You need to create a class that will be able to retrieve data from the desired source and will inherit from `runtime_config.sources.base.BaseSource`. After that, an instance of the class you created must be passed to the `RuntimeConfig.create` method. ```python your_source = YourSource(...) config = await RuntimeConfig.create(..., source=your_source) ``` # Development ## Install deps ``` poetry install --all-extras ``` ## Tests Check the work of the library on several versions of Python at once using the command below: ``` make test-multi-versions ``` The simple test run is available through the command below: ``` make test ``` ## Style code For automatic code formatting and code verification, you need to use the command below: ``` make lint ```
/runtime_config_py-0.0.8.tar.gz/runtime_config_py-0.0.8/README.md
0.790732
0.769319
README.md
pypi
import collections import threading from hookery import Registry _thread_local = threading.local() _thread_local.stack = collections.defaultdict(list) class Context(dict): """ Dictionary of current state. Do not work with this directly, instead use RuntimeContextWrapper. Includes a link to the wrapper which created this Context, so this context is able to pop itself from the stack. """ def __init__(self, wrapper: 'RuntimeContextWrapper', context_vars: dict): super().__init__(context_vars) self.wrapper = wrapper def __enter__(self): self._push_context() return self.wrapper def __exit__(self, exc_type, exc_val, exc_tb): self._pop_context() def _push_context(self): self.wrapper._stack.append(self) self.wrapper.context_entered(context_vars=self) def _pop_context(self): assert self.wrapper.current is self self.wrapper._stack.pop() self.wrapper.context_exited(context_vars=self) class RuntimeContextWrapper: """ The main interface to work with runtime contexts. Create one instance of this per project and then do everything through it. """ _internals_ = ( '_stack', '_hookery', 'context_entered', 'context_exited', ) def __init__(self): # Stack is wrapper-instance specific, so there can be multiple unrelated stacks per thread. self._stack = _thread_local.stack[self] self._hookery = Registry() self.context_entered = self._hookery.register_event('context_entered') self.context_exited = self._hookery.register_event('context_exited') # It simplifies life a lot if there is always one context present for each wrapper. self.push_context() def __getattr__(self, name): """ Attribute access is strict -- names not available in the stack will raise an AttributeError. Use the .get() method for non-strict access. """ if self.is_context_var(name): return self.get(name) raise AttributeError(name) def __setattr__(self, name, value): if name in self._internals_: object.__setattr__(self, name, value) else: self.set(name, value) def __delattr__(self, name): if name in self._internals_: raise AttributeError('{!r} should not be deleted'.format(name)) elif self.is_context_var(name): return self.reset(name) else: return object.__delattr__(self, name) def get(self, name, default=None): for ctx in reversed(self._stack): if name in ctx: return ctx[name] return default def set(self, name, value): self.current[name] = value def reset(self, name): """ Resets the value of a var in the current context. """ if name in self.current: del self.current[name] def reset_context(self): """ Clears current context state """ self.current.clear() def push_context(self, context_vars_dict=None, **context_vars): self.new_context(context_vars_dict=context_vars_dict, **context_vars)._push_context() def pop_context(self): self.current._pop_context() @property def current(self): if not self._stack: raise RuntimeError('Trying to get current context while outside of runtime context') return self._stack[-1] def is_context_var(self, name): """ Returns True if `name` is declared anywhere in the context stack. """ return any(name in ctx for ctx in reversed(self._stack)) def __call__(self, context_vars_dict=None, **context_vars): return self.new_context(context_vars_dict=context_vars_dict, **context_vars) def new_context(self, context_vars_dict=None, **context_vars): context_vars = context_vars_dict or context_vars return Context(self, context_vars)
/runtime-context-3.0.0.tar.gz/runtime-context-3.0.0/runtime_context/runtime_context.py
0.73431
0.16132
runtime_context.py
pypi
from __future__ import annotations __all__ = [ "KeyPath", "KeyPathSupporting", ] # region[Keywords] from typing import TYPE_CHECKING, Final, Generic, Protocol, TypeVar, cast, final # endregion[Keywords] # region[Types] if TYPE_CHECKING: from typing import Any, Sequence # endregion[Types] import threading from dataclasses import dataclass, field from typing import NamedTuple _Value_t = TypeVar("_Value_t") _Value_co = TypeVar("_Value_co", covariant=True) class _ThreadLocalProtocol(Protocol): recorder: _KeyPathRecorder """ The active key-path recorder for this thread. May not exist. """ _thread_local = cast("_ThreadLocalProtocol", threading.local()) @final class _Terminals(NamedTuple): start: Any end: Any @final @dataclass class _KeyPathRecorder: terminals: _Terminals | None = None key_list: list[str] = field(default_factory=list) busy: bool = False # ! A metaclass is made for class `KeyPath`, and `KeyPath.of` is # ! provided as a property on class `KeyPath`, so that whenever # ! `KeyPath.of` gets accessed, we can do something before it actually # ! gets called. @final class _KeyPathMeta(type): @property def of(self, /) -> _KeyPathOfFunction: """ Returns the key-path for accessing a certain value from a target object with a key sequence such as `a.b.c`. The target object and all intermediate objects, except for the final value, are expected to subclass `KeyPathSupporting`. Parameters ---------- `value` A value that is accessed with chained keys such as `a.b.c`. Returns ------- A key-path that indicates the target object and the key sequence to access the given value. Raises ------ `RuntimeError` Typically occurs when the target or an intermediate object isn't subclassing `KeyPathSupporting`. Check the error message for more details. Example ------- >>> class A(KeyPathSupporting): ... def __init__(self) -> None: ... self.b = B() ... def __repr__(self) -> str: ... return "a" >>> class B(KeyPathSupporting): ... def __init__(self) -> None: ... self.c = C() >>> class C: ... pass >>> a = A() >>> KeyPath.of(a.b.c) KeyPath(target=a, keys=('b', 'c')) """ try: _ = _thread_local.recorder except AttributeError: pass else: raise RuntimeError( " ".join( [ "An unfinished key-path recorder has been found.", "Check if `KeyPath.of` is always called immediatelly.", ] ) ) recorder = _KeyPathRecorder() _thread_local.recorder = recorder func = _KeyPathOfFunction() return func # ! We implement the result of `KeyPath.of` as a stand-alone class, so # ! that when an exception occurred during the key-path access, there # ! would still be a chance to perform some finalization. class _KeyPathOfFunction: """ Returns the key-path for accessing a certain value from a target object with a key sequence such as `a.b.c`. The target object and all intermediate objects, except for the final value, are expected to subclass `KeyPathSupporting`. Parameters ---------- `value` A value that is accessed with chained keys such as `a.b.c`. Returns ------- A key-path that indicates the target object and the key sequence to access the given value. Raises ------ `RuntimeError` Typically occurs when the target or an intermediate object isn't subclassing `KeyPathSupporting`. Check the error message for more details. Example ------- >>> class A(KeyPathSupporting): ... def __init__(self) -> None: ... self.b = B() ... def __repr__(self) -> str: ... return "a" >>> class B(KeyPathSupporting): ... def __init__(self) -> None: ... self.c = C() >>> class C: ... pass >>> a = A() >>> KeyPath.of(a.b.c) KeyPath(target=a, keys=('b', 'c')) """ __invoked: bool = False def __call__(self, value: _Value_t, /) -> KeyPath[_Value_t]: self.__invoked = True try: recorder = _thread_local.recorder except AttributeError: raise RuntimeError( " ".join( [ "`KeyPath.of` must be accessed and then called immediatedly", "and should NOT be called more than once.", ] ) ) del _thread_local.recorder assert not recorder.busy terminals = recorder.terminals key_list = recorder.key_list if terminals is None: assert len(key_list) == 0 raise RuntimeError("No key has been recorded.") else: assert len(key_list) > 0 if terminals.end is not value: raise RuntimeError( " ".join( [ "Key-path is broken. Check if there is something that does", "NOT support key-paths in the member chain.", ] ) ) key_path = KeyPath(terminals.start, key_list) return key_path def __del__(self, /) -> None: # ! If an exception had occured during the key-path access, or # ! this function were just discarded without being finally # ! called, we would do some cleaning here. if not self.__invoked: del _thread_local.recorder @final class KeyPath(Generic[_Value_co], metaclass=_KeyPathMeta): __target: Final[object] __keys: Final[Sequence[str]] def __init__(self, /, target: object, keys: str | Sequence[str]) -> None: self.__target = target if isinstance(keys, str): keys = tuple(keys.split(".")) else: keys = tuple(keys) self.__keys = keys @property def target(self, /) -> object: return self.__target @property def keys(self, /) -> Sequence[str]: return self.__keys def __hash__(self, /) -> int: return hash((self.target, self.keys)) def __eq__(self, other: object, /) -> bool: return ( isinstance(other, KeyPath) and self.target is other.target and self.keys == other.keys ) def __repr__(self, /) -> str: return f"{KeyPath.__name__}(target={self.target!r}, keys={self.keys!r})" def __call__(self, /) -> _Value_co: value = self.__target for key in self.__keys: value = getattr(value, key) return cast("_Value_co", value) class KeyPathSupporting: # ! This method is purposedly not named as `__getattribute__`. See below for reason. def __getattribute_0__(self, key: str, /) -> Any: try: recorder = _thread_local.recorder except AttributeError: # There is no recorder, which means that `KeyPath.of` is not # being called. So we don't need to record this key. return super().__getattribute__(key) if recorder.busy: # The recorder is busy, which means that another member is # being accessed, typically because the computation of that # member is dependent on this one. So we don't need to # record this key. return super().__getattribute__(key) recorder.busy = True terminals = recorder.terminals if terminals is not None and terminals.end is not self: raise RuntimeError( " ".join( [ "Key-path is broken. Check if there is something that does NOT", "support key-paths in the member chain.", ] ) ) value = super().__getattribute__(key) if terminals is None: terminals = _Terminals(self, value) else: terminals = terminals._replace(end=value) recorder.terminals = terminals recorder.key_list.append(key) recorder.busy = False return value # ! `__getattribute__(...)` is declared against `TYPE_CHECKING`, so that unknown # ! attributes on conforming classes won't be regarded as known by type-checkers. if not TYPE_CHECKING: __getattribute__ = __getattribute_0__ del __getattribute_0__
/runtime_keypath-0.1.2-py3-none-any.whl/runtime_keypath/_core.py
0.932423
0.333829
_core.py
pypi
`runtime-syspath` is a package to ease programmatically adding src root paths to `sys.path`. This is targeted at python test code that needs to discover a project's solution source to test. > :exclamation: It is generally **frowned upon** to alter the `sys.path` > programmatically as it confuses development, especially refactoring. > Python IDEs can statically determine if a dependent package's import > statement is left wanting whether a PyPi installation in needed or > source cannot be discovered through standard Python paths. A static > analysis tool's *missing import* detection will end up registering > false-negatives if the import is discovered via dynamic (programmatic) > additions to `sys.path` at runtime. *The following description assumes the use of `pytest` unit testing support and a project file structuring that includes project root directories named `src` (project solution) and `tests` (project tests of project source under `src`. Both `src` and `tests` are not intended to have package initializers (`__init__.py`). Packages therein will typically have package initializers allowing for test modules to have that same name (in separate packages). However, as a general rule, test modules are not intended to import other test modules. Therefore, there should be no need for `__init__.py`-enabled, relative importation between test cases or sub-package test cases. `pytest`'s [default test discovery](https://docs.pytest.org/en/latest/goodpractices.html#test-discovery) and intended design use negates the need for :* ``` ├─ src │ └─ __init__.py | └─ foo.py ├─ tests │ └─ test_foo.py │ └─ foo_and_goo │ └─ __init__.py │ └─ test_foo.py │ └─ test_goo.py └─ setup.py ``` *That structure is based upon [this guidance](https://blog.ionelmc.ro/2014/05/25/python-packaging/#the-structure).* When testing solution source in a project, the test cases _could_ statically access the solution source by importing with the `src` package prefix: ``` import src.packagename.foo ``` Not only does that not feel right at all, that solution implies that tests are run **only** from the project root, not within the `tests` directory itself. If the test is run within the `tests` directory, the `src` package won't be found at runtime. So, using: ``` import packagename.foo ``` ... the `src` directory would need to be programmatically added to the `sys.path`. This will allow for tests to be run form any working directory under the `tests` sub-tree. `runtime_syspath.add_srcdirs_to_syspath()` will discover all `src` directories under `<project root>/src`. The reason that there may be more is if your project may be leveraging `git subprojects` under `<project root>/src` that have their own `src` directories. Those need to be added to `sys.path` also. To leverage `runtime-syspath` to add the `src` directory everytime a test is run, import `runtime-syspath` and run `add_srcdirs_to_syspath()` in `tests/conftest.py`. (If `tests` contain more `conftest.py` under its directory tree, the call still only need appear in the root `test/conftest.py`!): ``` from runtime_syspath import add_srcdirs_to_syspath add_srcdirs_to_syspath() ``` `add_srcdirs_to_syspath()` will recursively discover **all** `src` subdirectories under the <project root>. For projects that use `git submodules`, their `src` directories need to be added to `src.path` for import access. `git subprojects` could be added to `src` or `tests` directory trees: ``` ├─ src │ └─ __init__.py | └─ projectpackage │ └─ __init__.py | └─ foo.py | └─ subproject | └─ src │ └─ __init__.py | └─ bar.py | └─ tests ├─ tests │ └─ test_foo.py | └─ test_subproject | └─ src │ └─ __init__.py | └─ unfoobarrator.py | └─ tests └─ setup.py ``` > :exclamation: Due to the code maintenance and grok'ing mayhem caused > by indiscriminate runtime additions to `sys.path`, your goal should be > to limit that anti-pattern to this discovery-of-source aspect for > import discovery. > :bulb: Since programmatically adding to a `sys.path` impairs an IDE's > ability to do static import discovery and leveraging IDE refactoring > features between the solution source and the test code, an IDE user > would need to manually mark all `src` directories as such. > PyCharm example: > > ![image](docs/images/IDE_SetSrc.png) #### SysPathSleuth; runtime reporting of programmatic `sys.path` access On a project riddled with programmatically appending source paths to `sys.path`, a tool to discover which modules are mucking with `sys.path` and when could prove useful. This discovery can assist with manually eradicating `sys.path` access in favor of updating imports with fully-qualified (anchored at but, not including `src`), absolute module/package names. static tools would then be able to discover the modules/packages imported. > Relative paths: There is a place for relative paths when importing > intra-package modules. But, when importing inter-package modules, > leveraging fully-qualified, absolute module/package names is a wiser > play. SysPathSleuth is a monkey-patch of `sys.path` to report on `sys.path` access that comes with an installer to install/uninstall SysPathSleuth into either the user or system site's _customize_ modules (`~/pathto/user_site/usercustomize.py` or `/pathto/python/site-packages/sitecustomize.py`). SysPathSleuth can be installed/uninstalled using one of following option: 1. `python -m syspath_sleuth \[--install _or_ --uninstall]` 2. `syspath_sleuth_injector \[--install _or_ --uninstall]` 3. at the start within a running program At the start of a running program prior: ``` import atexit import syspath_sleuth from runtime-syspath import syspath_slueth syspath_sleuth.inject_sleuth() def uninstall_syspath_sleuth(): syspath_sleuth.uninstall_sleuth() atexit.register(uninstall_syspath_sleuth) if __name__ == "__main__": go_main_go() ``` It is possible to provide your own SysPathSleuth for more interesting data gathering using the CLI: ``` syspath_sleuth_injector --install --custom my_custom_syspath_sleuth.py ``` That file must have a class named `SysPathSleuth` and wrap the `sys.path` or the `syspath_sleuth_injector` will reject it. _See `src/runtime_syspath/syspath_sleuth/syspath_sleuth.py` for out-of-box implementation._ Think along the lines of providing telemetry as long-running programs wheedle there ways over their execution paths using logger `Handler` that sending data to a service. See example uses in the [`examples` subdirectory of this project](https://github.com/gkedge/runtime-syspath/tree/master/examples) .
/runtime-syspath-0.2.14.tar.gz/runtime-syspath-0.2.14/README.md
0.841696
0.877896
README.md
pypi
import os import re import sys from itertools import chain from pathlib import Path, PurePath from string import Template from types import ModuleType from typing import Dict, List, Optional, Pattern, Set, Tuple, Union from .syspath_path_utils import get_project_root_dir from .syspath_sleuth import get_customize_path _STD_SYSPATH_FILTER: Union[None, Pattern] = None PATH_TO_PROJECT_PLACEHOLDER = "path_to_project" def init_std_syspath_filter(std_syspath_filter: Pattern) -> None: """ Provide a globally bound standard filter Pattern applied to all subsequent sys.path filtering operations. Note: a init_std_syspath_filter() is called from __init__.py to default to a globally applied pattern. :param std_syspath_filter: pattern to apply to all filter operations. Can be None. :return: None """ # pylint: disable=global-statement global _STD_SYSPATH_FILTER # pylint: enable=global-statement _STD_SYSPATH_FILTER = std_syspath_filter def filtered_sorted_syspath( path_filter: Pattern = None, no_filtering: bool = False, sort: bool = False, unique: bool = False, ) -> List[str]: """ Filter and sort the sys.path for only paths of interest. :param path_filter: a pattern that the caller can provide in addition to the std_syspath_filter :param no_filtering: allow user to not filter at all :param sort: allow caller to sort the filtered (if filtering) sys.path :param unique: allow caller to only return unique members of sys.path :return: sys.path with filtering and sorting applied """ paths: List[str] = sys.path if not no_filtering: path: str if _STD_SYSPATH_FILTER: paths = [path for path in paths if not re.search(_STD_SYSPATH_FILTER, path)] if path_filter: paths = [path for path in paths if not re.search(path_filter, path)] if sort: paths = sorted(paths, reverse=True) if unique: unique_paths: List[str] = [] for path in paths: if path not in unique_paths: unique_paths.append(path) paths = unique_paths return paths def print_syspath( path_filter: Pattern = None, no_filtering: bool = False, sort: bool = True, unique: bool = False ) -> None: """ Filter and sort the sys.path for only paths of interest. :param path_filter: a pattern that the caller can provide in addition to the std_syspath_filter :param no_filtering: caller user to not filter at all :param sort: allow caller to sort the filtered (if filtering) sys.path :param unique: allow caller to only return unique members of sys.path :return: None """ paths: List[str] = filtered_sorted_syspath(path_filter, no_filtering, sort, unique) print(f"\nsys.path({len(paths)} paths):") path: str for path in paths: print(f"\t{path}") def persist_syspath( user_provided_project_dir: Path = None, force_pth_dir_creation: bool = False, path_filter: Pattern = None, ) -> None: """ Persist a set of ordered [000-999]*.pth.template files that represent each project-related entry in the sys.path. The files are persisted into the /pathto/projectroot/pths directory. If caller did not supply the /pathto/projectroot via 'user_provided_project_dir', attempt to determine that. :param user_provided_project_dir: root of project using persist_syspath() :param force_pth_dir_creation: for directory creation :param path_filter: a pattern that the user can provide in addition to the std_syspath_filter :return: None """ project_dir: PurePath = ( user_provided_project_dir if user_provided_project_dir else get_project_root_dir() ) template_dir: Path = Path(project_dir / "pths") if not template_dir.exists(): create = force_pth_dir_creation or input( f"Create {template_dir}? [y,n] " ).strip().lower().startswith("y") if create: template_dir.mkdir(mode=0o766) sys_paths: List[str] = filtered_sorted_syspath(path_filter, unique=True) for index, sys_path_str in enumerate(sys_paths): if not sys_path_str.startswith(os.fspath(project_dir)): continue pth_path = Path(sys_path_str) relative_pth: PurePath = pth_path.relative_to(project_dir) # A project's root directory is rarely added to sys.path by a targeted application. It is # more likely that it was added by python itself when executing a module, e.g.: # python -m pytest ... or even when the Pycharm debugger is used. Skipping persisting # project_dir in sys.path if relative_pth == project_dir: continue template_path = Path( template_dir, f"{index:03d}_{project_dir.stem}_" f"{os.fspath(relative_pth).replace(os.sep, '_')}.pth.template", ) if template_path.exists(): continue with template_path.open("x") as template_path_f: # Write template that can be converted to wherever a project's clones are # rooted using inject_project_pths_to_site() relative_pth = ( os.sep + os.fspath(relative_pth) if relative_pth != Path("root") else Path("") ) template_path_f.write(f"${{{PATH_TO_PROJECT_PLACEHOLDER}}}{relative_pth}\n") dedup_pth_templates(template_dir) def inject_project_pths_to_site(user_provided_project_dir: PurePath = None) -> None: """ Iterate through all templates in /pathto/projectroot/pths converting the templates to the paths rooted to the current /pathto/projectroot. If caller did not supply the /pathto/projectroot via 'user_provided_project_dir', attempt to determine that. :param user_provided_project_dir: root of project using inject_project_pths_to_site() """ project_dir: Path = ( Path(user_provided_project_dir) if user_provided_project_dir else Path(get_project_root_dir()) ) clear_site_pths(project_dir.stem) template_dir: Path = Path(project_dir / "pths") if not template_dir.exists(): print(f"No pth templates found within {os.fspath(template_dir)}") return site_path = get_customize_path()[0].parent pth_templates = get_pth_templates(template_dir) for template_path in pth_templates: site_pth_path: Path = site_path / template_path.stem with site_pth_path.open("w") as site_pth_path_f: site_pth_path_f.write(pth_templates[template_path]) def clear_site_pths(project_name: str) -> None: site_path = get_customize_path()[0].parent project_site_pth: Path for project_site_pth in site_path.glob(f"[0-9][0-9][0-9]_{project_name}_*.pth"): project_site_pth.unlink() def dedup_pth_templates(template_dir) -> None: """ get_pth_templates dedup's for us; just making it obvious that the return value has no value. :param template_dir: """ get_pth_templates(template_dir) def get_pth_templates(template_dir: Path) -> Dict[Path, str]: """ For each template in template_dir, fill in template with 'project_name' and clean up any templates that would represent the same path being added to sys.path. :param template_dir: :return: dictionary mapping template Path to the filled-in string """ substitution_map: Dict[str, str] = {PATH_TO_PROJECT_PLACEHOLDER: os.fspath(template_dir.parent)} pth_templates: Dict[Path, str] = {} filled_in_path_to_file_map: Dict[str, str] = {} templates_paths: List[Path] = list(template_dir.glob("*.pth.template")) templates_paths.sort() template_path: Path for template_path in templates_paths: with template_path.open() as template_f: template_str = template_f.read().strip() filled_in_path = Template(template_str).substitute(substitution_map) if filled_in_path in filled_in_path_to_file_map: # There are duplicate files (ordered differently) that contain the same path to be # added to sys.path. The first one wins, all other template files having the same # paths for addition to sys.path are deleted. print( f"{template_path.name}'s {filled_in_path} already represented with " f"{filled_in_path_to_file_map[filled_in_path]}.\n\tDeleting {template_path.name}" ) template_path.unlink() continue filled_in_path_to_file_map[filled_in_path] = template_path.name pth_templates[template_path] = filled_in_path return pth_templates def add_srcdirs_to_syspath(user_provided_project_dir: PurePath = None) -> None: """ Add all src directories under current working directory to sys.path. If caller did not supply the /pathto/projectroot via 'user_provided_project_dir', attempt to determine that, walk up ancestry to find a directory containing a 'src' directory. Waking up allows for being within in the 'tests' directory when initiating tests against modules under 'root/src'. Searching for 'src' directories is NOT limited to finding the '<project root>/src' (and 'src' directories under that '<project root>/src' directory)! All those will be found and added, but also any other 'src' directory found under the <project root>/tests tree. This is desired since git subprojects may be under 'tests' and their 'src' directories need to be included. :param user_provided_project_dir: root of project using inject_project_pths_to_site() :return: None """ project_dir: Path = ( Path(user_provided_project_dir) if user_provided_project_dir else Path(get_project_root_dir()) ) prior_sys_path = sys.path.copy() src: Path for src in chain.from_iterable([project_dir.glob("src"), project_dir.glob("tests/**/src")]): tested_src_str = str(src) if src.is_dir() and tested_src_str not in sys.path: sys.path.append(tested_src_str) diff_path_strs: Set[str] = set(prior_sys_path).symmetric_difference(set(sys.path)) if len(diff_path_strs) > 0: diff_path_strs = {Path(diff_path_str).as_posix() for diff_path_str in diff_path_strs} print(f"Added to sys.path: {sorted(diff_path_strs)}") def get_package_and_max_relative_import_dots( module_name: str, ) -> Tuple[Optional[str], Optional[str]]: """ Derive the fully-qualified package related to already-imported module named by 'module_name'. In addition, return the number of relative dots that can be used in that module before either of the following occur: ValueError: attempted relative import beyond top-level package ImportError: attempted relative import beyond top-level package :param module_name: module name of already-imported module :return: fully-qualified package and max relative dots. """ target_module: ModuleType = sys.modules[module_name] dots: str = "" if not target_module.__package__ else "." dot_count: int = target_module.__package__.count(".") if target_module.__package__ else 0 dots: str = dots + "".join("." for i in range(0, dot_count)) return target_module.__package__, dots
/runtime-syspath-0.2.14.tar.gz/runtime-syspath-0.2.14/src/runtime_syspath/syspath_utils.py
0.618896
0.228329
syspath_utils.py
pypi
from abc import ABCMeta, abstractmethod from collections.abc import Mapping as MappingCol, Collection from contextlib import suppress from functools import lru_cache, wraps from inspect import isclass, isfunction, ismethod, signature, unwrap from typing import Any, Callable, Iterable, Mapping, Tuple, Union, get_type_hints from typing_inspect import ( get_bound, is_callable_type, is_classvar, is_forward_ref, is_generic_type, is_literal_type, is_new_type, is_tuple_type, is_typevar, is_union_type, get_args, get_constraints, get_origin, ) from .typing_inspect_extensions import get_origin_or_self, is_valid_type, is_type, is_typed_dict from .utils import evaluate_forward_reference, get_func_type_hints, type_repr __all__ = ["check_type", "check_types", "TypeChecker", "USE_CACHING"] USE_CACHING = True if USE_CACHING: is_valid_type = lru_cache(maxsize=4096)(is_valid_type) get_type_hints = lru_cache(maxsize=4096)(get_type_hints) evaluate_forward_reference = lru_cache(maxsize=512)(evaluate_forward_reference) cache_decorator = lru_cache(maxsize=4096) else: def cache_decorator(f): return f def check_type(instance, type_or_hint, *, is_argument: bool = True) -> None: type_checker = TypeChecker.get(type_or_hint, is_argument=is_argument) return type_checker.check_type(instance) def check_types(class_or_func): """ Use this decorator to check the type(s) of a class or a function: - in case of a class, any instance of the class will be type-checked - in case of a function, arguments and return values will """ if isclass(class_or_func): attribute_hints = get_type_hints(class_or_func) attribute_checkers = {name: TypeChecker.get(hint) for name, hint in attribute_hints.items()} @wraps(class_or_func) def wrapped(*args, **kwargs): instance = class_or_func(*args, **kwargs) for attr_name, checker in attribute_checkers.items(): val = getattr(instance, attr_name) try: checker.check_type(val) except TypeError as e: raise TypeError( f"Attribute: '{attr_name}' of instance: '{instance}' with value: '{val}' has wrong type." ) from e return instance elif isfunction(class_or_func) or ismethod(class_or_func): func = unwrap(class_or_func, stop=lambda f: hasattr(f, "__code__")) func_signature = signature(func) func_type_hints = dict(get_func_type_hints(func)) # CONSIDER: this does not take well in account TypeVars: you could end up with a return value with a different # type as argument, even though they have the same TypeVar. return_checker = TypeChecker.get(func_type_hints.pop("return")) argument_checkers = {name: TypeChecker.get(hint, is_argument=True) for name, hint in func_type_hints.items()} @wraps(class_or_func) def wrapped(*args, **kwargs): bound_sig = func_signature.bind(*args, **kwargs) for name, val in bound_sig.arguments.items(): try: argument_checkers[name].check_type(val) except TypeError as e: raise TypeError( f"Argument: '{name}' of : '{class_or_func}' with value: '{val}' has wrong type." ) from e return_value = class_or_func(*args, **kwargs) try: return_checker.check_type(return_value) except TypeError as e: raise TypeError( f"Return value of : '{class_or_func}' with value: '{return_value}' has wrong type." ) from e else: return return_value else: raise NotImplementedError(f"'check_types' is not implemented for: {class_or_func}") return wrapped class TypeChecker(metaclass=ABCMeta): def __init__(self, type_): self._type = type_ self._type_repr = type_repr(type_) def __repr__(self) -> str: return f"{type(self).__qualname__}({self._type_repr})" @classmethod @cache_decorator def get(cls, type_or_hint, *, is_argument: bool = False) -> "TypeChecker": # This ensures the validity of the type passed (see typing documentation for info) type_or_hint = is_valid_type(type_or_hint, "Invalid type.", is_argument) if type_or_hint is Any: return AnyTypeChecker() if is_type(type_or_hint): return TypeTypeChecker.make(type_or_hint, is_argument) if is_literal_type(type_or_hint): return LiteralTypeChecker.make(type_or_hint, is_argument) if is_generic_type(type_or_hint): origin = get_origin_or_self(type_or_hint) if issubclass(origin, MappingCol): return MappingTypeChecker.make(type_or_hint, is_argument) if issubclass(origin, Collection): return CollectionTypeChecker.make(type_or_hint, is_argument) # CONSIDER: how to cater for exhaustible generators? if issubclass(origin, Iterable): raise NotImplementedError("No type-checker is setup for iterables that exhaust.") return GenericTypeChecker.make(type_or_hint, is_argument) if is_tuple_type(type_or_hint): return TupleTypeChecker.make(type_or_hint, is_argument) if is_callable_type(type_or_hint): return CallableTypeChecker.make(type_or_hint, is_argument) if isclass(type_or_hint): if is_typed_dict(type_or_hint): return TypedDictChecker.make(type_or_hint, is_argument) return ConcreteTypeChecker.make(type_or_hint, is_argument) if is_union_type(type_or_hint): return UnionTypeChecker.make(type_or_hint, is_argument) if is_typevar(type_or_hint): bound_type = get_bound(type_or_hint) if bound_type: return cls.get(bound_type) constraints = get_constraints(type_or_hint) if constraints: union_type_checkers = tuple(cls.get(type_) for type_ in constraints) return UnionTypeChecker(Union.__getitem__(constraints), union_type_checkers) else: return AnyTypeChecker() if is_new_type(type_or_hint): super_type = getattr(type_or_hint, "__supertype__", None) if super_type is None: raise TypeError(f"No supertype for NewType: {type_or_hint}. This is not allowed.") return cls.get(super_type) if is_forward_ref(type_or_hint): return ForwardTypeChecker.make(type_or_hint, is_argument=is_argument) if is_classvar(type_or_hint): var_type = get_args(type_or_hint, evaluate=True)[0] return cls.get(var_type) raise NotImplementedError(f"No {TypeChecker.__qualname__} is available for type or hint: '{type_or_hint}'") @classmethod @abstractmethod def make(cls, type_or_hint, is_argument: bool) -> "TypeChecker": raise NotImplementedError() @property def type(self): return self._type @abstractmethod def check_subclass(self, type_) -> None: raise NotImplementedError(f"{type(self).__qualname__} does not implement 'check_subclass'.") @abstractmethod def check_type(self, instance) -> None: raise NotImplementedError(f"{type(self).__qualname__} does not implement 'check_type'.") class AnyTypeChecker(TypeChecker): def __init__(self): super().__init__(Any) @classmethod def make(cls, type_or_hint, is_argument: bool) -> "AnyTypeChecker": return cls() def check_subclass(self, type_) -> None: pass def check_type(self, instance) -> None: pass class CallableTypeChecker(TypeChecker): def __init__(self, callable_type): super().__init__(callable_type) @classmethod def make(cls, type_or_hint, is_argument: bool) -> "CallableTypeChecker": return cls(type_or_hint) def check_subclass(self, type_) -> None: super().check_subclass(type_) def check_type(self, instance) -> None: # CONSIDER: beef this check up so it's possible to check the arguments and return types if not callable(instance): raise TypeError(f"Callable type: {self._type_repr} expects a callable. '{instance}' isn't.") class CollectionTypeChecker(TypeChecker): def __init__(self, collection_type, collection_checker: TypeChecker, item_checker: TypeChecker): super().__init__(collection_type) self._collection_checker = collection_checker self._item_checker = item_checker @classmethod def make(cls, type_or_hint, is_argument: bool) -> "CollectionTypeChecker": origin = get_origin(type_or_hint) origin_type_checker = ConcreteTypeChecker(origin) item_type = (get_args(type_or_hint, evaluate=True) or (Any,))[0] return cls(type_or_hint, origin_type_checker, cls.get(item_type)) def check_subclass(self, type_) -> None: self._collection_checker.check_subclass(type_) def check_type(self, instance) -> None: self._collection_checker.check_type(instance) for item in instance: try: self._item_checker.check_type(item) except TypeError as e: raise TypeError(f"Item: '{item}' of collection: '{instance}' has wrong type.") from e class ConcreteTypeChecker(TypeChecker): def __init__(self, concrete_type): super().__init__(concrete_type) @classmethod def make(cls, type_or_hint, is_argument: bool) -> "ConcreteTypeChecker": return cls(type_or_hint) def check_subclass(self, type_) -> None: if not issubclass(type_, self._type): raise TypeError(f"Type: '{type_repr(type_)}' is not consistent with expected type: '{self._type_repr}'.") def check_type(self, instance) -> None: if not isinstance(instance, self._type): raise TypeError( f"Type: '{type_repr(type(instance))}' is not consistent with expected type: '{self._type_repr}'." ) class ForwardTypeChecker(TypeChecker): def __init__(self, forward_ref, is_argument: bool): super().__init__(forward_ref) self._is_argument = is_argument self._forward_type_checker = None @classmethod def make(cls, type_or_hint, is_argument: bool) -> "ForwardTypeChecker": return cls(type_or_hint, is_argument) def check_subclass(self, type_) -> None: return self._get_forward_type_checker(type_).check_subclass(type_) def check_type(self, instance) -> None: return self._get_forward_type_checker(instance).check_type(instance) def _get_forward_type_checker(self, instance_or_type) -> TypeChecker: if not self._forward_type_checker: if self._type.__forward_evaluated__: forward_type = self._type.__forward_value__ else: try: forward_type = evaluate_forward_reference(self._type, getattr(instance_or_type, "__module__", None)) except NameError: raise TypeError(f"I could not evaluate forward type: '{self._type_repr}' using: {instance_or_type}") self._forward_type_checker = self.get(forward_type, is_argument=self._is_argument) return self._forward_type_checker class LiteralTypeChecker(TypeChecker): def __init__(self, literal_type, literal_values): super().__init__(literal_type) self._literal_values = literal_values @classmethod def make(cls, type_or_hint, is_argument: bool) -> "LiteralTypeChecker": literals_values = get_args(type_or_hint, evaluate=True) return cls(type_or_hint, literals_values) def check_subclass(self, type_) -> None: return super().check_subclass(type_) def check_type(self, instance) -> None: if instance not in self._literal_values: raise TypeError(f"Value: {instance} is not in the list of literals: {self._literal_values}.") class MappingTypeChecker(TypeChecker): def __init__( self, mapping_type, mapping_checker: TypeChecker, key_checker: TypeChecker, value_checker: TypeChecker ): super().__init__(mapping_type) self._mapping_checker = mapping_checker self._key_checker = key_checker self._value_checker = value_checker @classmethod def make(cls, type_or_hint, is_argument: bool) -> "MappingTypeChecker": origin = get_origin(type_or_hint) origin_type_checker = ConcreteTypeChecker(origin) key_type, value_type = get_args(type_or_hint, evaluate=True) or (Any, Any) return cls(type_or_hint, origin_type_checker, cls.get(key_type), cls.get(value_type),) def check_subclass(self, type_) -> None: self._mapping_checker.check_subclass(type_) def check_type(self, instance) -> None: self._mapping_checker.check_type(instance) for key, val in instance.items(): try: self._key_checker.check_type(key) except TypeError as e: raise TypeError(f"Key: '{key}' of mapping: '{instance}' has wrong type.") from e try: self._value_checker.check_type(val) except TypeError as e: raise TypeError(f"Value: '{val}' of mapping: '{instance}' has wrong type.") from e class GenericTypeChecker(TypeChecker): def __init__(self, origin_type, origin_checker: TypeChecker): super().__init__(origin_type) self._origin_checker = origin_checker @classmethod def make(cls, type_or_hint, is_argument: bool) -> "GenericTypeChecker": origin = get_origin_or_self(type_or_hint) origin_type_checker = ConcreteTypeChecker.make(origin, is_argument) # CONSIDER: how do we ensure that the bound type or constraints are respected by the type? return cls(type_or_hint, origin_type_checker) def check_subclass(self, type_) -> None: return self._check(self._origin_checker.check_subclass, type_) def check_type(self, instance) -> None: return self._check(self._origin_checker.check_type, instance) def _check(self, check_function: Callable[[Any], None], instance_or_type): try: return check_function(instance_or_type) except TypeError: raise except Exception as e: raise NotImplementedError( f"I could not check: '{instance_or_type}' with generic type hint: '{self._type_repr}'" ) from e class TupleTypeChecker(TypeChecker): def __init__(self, tuple_type, tuple_checker: TypeChecker, item_checkers: Tuple[TypeChecker, ...]): super().__init__(tuple_type) self._tuple_checker = tuple_checker self._item_checkers = item_checkers @classmethod def make(cls, type_or_hint, is_argument: bool) -> Union[CollectionTypeChecker, "TupleTypeChecker"]: tuple_type_checker = ConcreteTypeChecker(tuple) args = get_args(type_or_hint, evaluate=True) len_args = len(args) if len_args == 2 and args[1] is Ellipsis: return CollectionTypeChecker(type_or_hint, tuple_type_checker, cls.get(args[0])) checkers = tuple(cls.get(item_type) for item_type in args) return cls(type_or_hint, tuple_type_checker, checkers) def check_subclass(self, type_) -> None: return self._tuple_checker.check_subclass(type_) def check_type(self, instance) -> None: self._tuple_checker.check_type(instance) len_instance, len_args = len(instance), len(self._item_checkers) if not len_args: if len_instance > 1: raise TypeError(f"'Tuple' expects a tuple of len: 1. " f"Tuple: '{instance}' has len: {len_instance}.") return if len(instance) != len(self._item_checkers): raise TypeError( f"'{self._type_repr}' expects a tuple of len: {len_args}. " f"Tuple: '{instance}' has len: {len_instance}." ) for i, (checker, item) in enumerate(zip(self._item_checkers, instance)): try: checker.check_type(item) except TypeError as e: raise TypeError(f"Item: {i} of tuple: '{instance}' with value: '{item}' has wrong type.") from e class TypeTypeChecker(TypeChecker): def __init__(self, type_type, type_checker: TypeChecker): super().__init__(type_type) self._type_checker = type_checker @classmethod def make(cls, type_or_hint, is_argument: bool) -> "TypeTypeChecker": var_type = get_args(type_or_hint, evaluate=True)[0] return cls(type_or_hint, cls.get(var_type)) def check_subclass(self, type_) -> None: return super().check_subclass(type_) def check_type(self, instance) -> None: return self._type_checker.check_subclass(instance) class TypedDictChecker(TypeChecker): def __init__(self, typed_dict_type, dict_checker: TypeChecker, attribute_checkers: Mapping[str, TypeChecker]): super().__init__(typed_dict_type) self._dict_checker = dict_checker self._attribute_checkers = attribute_checkers @classmethod def make(cls, type_or_hint, is_argument: bool) -> "TypedDictChecker": attribute_hints = get_type_hints(type_or_hint) attribute_checkers = {name: cls.get(hint) for name, hint in attribute_hints.items()} dict_checker = ConcreteTypeChecker(dict) return cls(type_or_hint, dict_checker, attribute_checkers) def check_subclass(self, type_) -> None: return self._dict_checker.check_subclass(type_) def check_type(self, instance) -> None: instance_keys, typed_dict_keys = instance.keys(), self._attribute_checkers.keys() unknown_keys = instance_keys - typed_dict_keys if unknown_keys: raise TypeError( f"Keys: '{list(unknown_keys)}' of dict: {instance} are not part of typed dict: '{self._type_repr}'." ) if getattr(self._type, "__total__", True): missing_keys = typed_dict_keys - instance_keys if missing_keys: raise TypeError( f"Keys: '{list(missing_keys)}' of typed dict: '{self._type_repr}' are not set in '{instance}'." ) for name, checker in self._attribute_checkers.items(): val = instance[name] try: checker.check_type(val) except TypeError as e: raise TypeError(f"Key: '{name}' of TypedDict: '{instance}' with value: '{val}' has wrong type.") from e class UnionTypeChecker(TypeChecker): def __init__(self, union_type, type_checkers: Tuple[TypeChecker, ...]): super().__init__(union_type) self._type_checkers = type_checkers @classmethod def make(cls, type_or_hint, is_argument: bool) -> "UnionTypeChecker": union_types = get_args(type_or_hint, evaluate=True) union_type_checkers = tuple(cls.get(type_) for type_ in union_types) return cls(type_or_hint, union_type_checkers) def check_subclass(self, type_) -> None: return self._iterate_checks((ckr.check_subclass for ckr in self._type_checkers), type_, False) def check_type(self, instance) -> None: return self._iterate_checks((ckr.check_type for ckr in self._type_checkers), instance, True) def _iterate_checks( self, check_functions: Iterable[Callable[[Any], None]], instance_or_type, instance_check: bool ) -> None: for check_function in check_functions: with suppress(TypeError): return check_function(instance_or_type) raise TypeError( f"{'Instance of' if instance_check else 'Type'}: " f"'{instance_or_type}' does not belong to: {self._type_repr}." )
/runtime_type_checker-0.5.0-py3-none-any.whl/runtime_type_checker/_checkers.py
0.81928
0.171512
_checkers.py
pypi
from inspect import signature import sys from types import FunctionType from typing import ( Any, Callable, get_type_hints, Mapping, Sequence, ) try: from typing import _eval_type except ImportError as e: raise NotImplementedError("runtime-type-checker is incompatible with the version of python used.") from e __all__ = ["evaluate_forward_reference", "get_func_type_hints", "type_repr"] def evaluate_forward_reference(forward_reference, module_name=None): """ Evaluate a forward reference using the module name's dict """ if module_name: try: globalns = sys.modules[module_name].__dict__ except (KeyError, AttributeError): globalns = None else: globalns = None return _eval_type(forward_reference, globalns, None) def get_func_type_hints(func: Callable[..., Any]) -> Mapping[str, Any]: """ returns a mapping of argument name & "return" (for return value) to type annotation. Defaults to Any if no annotation is provided. """ results = {} type_hints = get_type_hints(func) func_sig = signature(func) for name, param in func_sig.parameters.items(): type_hint = type_hints.get(name, param.annotation) annotation = Any if type_hint is param.empty else type_hint if param.kind is param.VAR_KEYWORD: results[name] = Mapping[str, annotation] elif param.kind is param.VAR_POSITIONAL: results[name] = Sequence[annotation] else: results[name] = annotation type_hint = type_hints.get("return", func_sig.return_annotation) annotation = Any if type_hint is func_sig.empty else type_hint results["return"] = annotation return results def type_repr(type_or_hint) -> str: """ Returns representation of a type. This function was taken verbatim from the typing module. """ if isinstance(type_or_hint, type): if type_or_hint.__module__ == "builtins": return type_or_hint.__qualname__ return f"{type_or_hint.__module__}.{type_or_hint.__qualname__}" if type_or_hint is ...: return "..." if isinstance(type_or_hint, FunctionType): return type_or_hint.__name__ return repr(type_or_hint)
/runtime_type_checker-0.5.0-py3-none-any.whl/runtime_type_checker/utils.py
0.54577
0.18591
utils.py
pypi
from inspect import _empty, signature from functools import wraps from typing import Callable, Literal, Iterable, Optional from runtime_typing.typed_function import TypedFunction from runtime_typing.utils import optional_arguments_to_decorator @optional_arguments_to_decorator def typed( obj: "Callable", mode: Literal["raise", "warn", "return"] = "raise", defer: bool = False, exclude: Optional[Iterable[str]] = None, include: Optional[Iterable[str]] = None, ) -> "Callable": """Decorator for validating arguments against type annotations. Parameters ---------- obj The object to be typed (either a function or a class). When a class is decorated, all its methods are typed, except for classmethods. Subclasses are not typed subsequently. See Examples below. mode Mode how to handle typing violations. Default: `'raise'` + `'raise'`: For any violation of a type constraint, a `runtime_typing.RuntimeTypingError` is raised. + `'warn'`: For any violation of a type constraint, a `runtime_typing.RuntimeTypingWarning` is being thrown. + `'return'`: No exception is raised and no warning is thrown, but the return value of the function is a 2-Tuple, consisting of the original result of the function and a (possibly empty) list of `runtime_typing.TypingViolation`. defer Whether to defer the handling of a violation. Default: `False`. By default, `@typed` handles every violation as soon as it occurs. This behavior can be changed by setting `defer` to `True`. This will gather all violations before handling them (i.e. throwing an Exception or a Warning) include Iterable of names of arguments (can also contain "return") to be taken into account for type-checking. If falsey (an empty iterable, or not provided), all type-annotated arguments of the function are taken into account (except for those listed in the `exclude` parameter). exclude Iterable of names of arguments (can also contatin "return") to be ignored during type-checking. Definitions via `exclude` prevail over those via `include`. Example ------- Simple usage of the `@typed` decorator on a function. .. code-block:: python @typed def identity_of_int(x: int) -> int: return x >>> identity_of_int("not an int") RuntimeTypingError: TypingViolation in function `identity_of_int`: Expected type of argument `x` to be `<class 'int'>` (got `<class 'str'>`). Example ------- Usage with `typing` types. .. code-block:: python from typing import Union @typed def identity_of_number(x: "Union[int, float]") -> "Union[int, float]": return x >>> identity_of_number("not a number") RuntimeTypingError: TypingViolation in function `identity_of_number`: Expected type of argument `x` to be one of [<class 'int'>, <class 'float'>] (got `<class 'str'>`). Example ------- Make function return violations instead of raising with `mode="return"`. .. code-block:: python @typed(mode="return") def identity_of_int(x: int) -> int: return x >>> identity_of_int("This does not raise.") ('This does not raise.', [TypingViolation in function `identity_of_int`: Expected type of argument `x` to be `<class 'int'>` (got `<class 'str'>`)., TypingViolation in function `identity_of_int`: Expected type of argument `return` to be `<class 'int'>` (got `<class 'str'>`).]) Example ---------- Defer raising violations with `defer=True`. .. code-block:: python @typed(defer=True) def identity_of_int(x: int) -> int: return x >>> identity_of_int("not an int") RuntimeTypingError: + TypingViolation in function `identity_of_int`: Expected type of argument `x` to be `<class 'int'>` (got `<class 'str'>`). + TypingViolation in function `identity_of_int`: Expected type of argument `return` to be `<class 'int'>` (got `<class 'str'>`). Example ------- Use `include` and `exclude` parameters to restrict the function-arguments which are exposed to typechecking: No Exception is raised in the following example, because only the return value is type-checked: .. code-block:: python @typed(include=("return",)) def check_return_only(x: int) -> str: return str(x) >>> check_only("not an int") "not an int" Here, `x` is not typ-checked, because it is excluded: .. code-block:: python @typed(exclude=("x",)) def do_not_check_x(x: int, y: int, z: int) -> str: return ", ".join([str(x), str(y), str(z)]) >>> do_not_check_x("not an int", 2, 3) "not an int, 2, 3" The following function is effectively not type-checked, because the included parameter `x` is also excluded. (`exclude` prevails `include`): .. code-block:: python @typed(exclude=("x", "y", "return"), include=("x",)) def effectively_check_nothing(x: int, y: float) -> str: return (x, y) Example ------- Use `@typed` on a class: Instance methods and staticmethods are typed, even if they are inherited from an un-typed class; classmethods and nested classes are not typed. .. code-block:: python class SomeSuperClass: def some_super_instance_method(self, x: int): pass @typed class SomeClass(SomeSuperClass): @classmethod def some_classmethod(cls, x: int): pass @staticmethod def some_staticmethod(cls, x: int): pass def __init__(self, x: int): pass def some_instance_method(self, x: int): pass class SomeNestedClass: def __init__(self, x: int): pass >>> SomeClass("not an int") RuntimeTypingError: TypingViolation in function `__init__`: Expected type of argument `x` to be `<class 'int'>` (got `<class 'str'>`). >>> SomeClass(1).some_instance_method("not an int") RuntimeTypingError: TypingViolation in function `some_instance_method`: Expected type of argument `x` to be `<class 'int'>` (got `<class 'str'>`) >>> SomeClass(1).some_super_instance_method("not an int") RuntimeTypingError: TypingViolation in function `some_super_instance_method`: Expected type of argument `x` to be `<class 'int'>` (got `<class 'str'>`). >>> SomeClass.some_staticmethod("not an int") RuntimeTypingError: TypingViolation in function `some_staticmethod`: Expected type of argument `x` to be `<class 'int'>` (got `<class 'str'>`). >>> SomeClass.some_classmethod("not an int") # does not raise >>> SomeClass(1).SomeNestedClass("not an int") # does not raise >>> SomeClass.SomeNestedClass("not an int") # does not raise Example ------- Typing a classmethod. If you want to type a classmethod of a class, you can do so by explicitely decorating it: .. code-block:: python class TypedClassMethodClass: @classmethod @typed def some_class_method(cls, x: int): pass >>> TypedClassMethodClass.some_class_method("not an int") RuntimeTypingError: TypingViolation in function `some_class_method`: Expected type of argument `x` to be `<class 'int'>` (got `<class 'str'>`). """ @wraps(obj) def validated(*args, **kwargs): func_parameters = signature(obj).parameters given_args = dict(zip(func_parameters.keys(), args)) given_args.update(kwargs) default_args = { name: parameter.default for name, parameter in func_parameters.items() if parameter.default is not _empty } kwargs = {**default_args, **given_args} typed_func = TypedFunction( func=obj, kwargs=kwargs, mode=mode, defer=defer, exclude=exclude, include=include, ) result = typed_func() return result return validated
/runtime_typing-1.0.0.tar.gz/runtime_typing-1.0.0/runtime_typing/typed.py
0.928124
0.541773
typed.py
pypi
from collections.abc import Callable, Iterable from typing import ( get_args, get_type_hints, Any, Callable as TypingCallable, Dict, _GenericAlias, Iterable as TypingIterable, List, Literal, Optional, Tuple, TypeVar, TypedDict, Union, ) from warnings import warn from runtime_typing.violations import ( RuntimeTypingViolation, ComplexRuntimeTypingViolation, RuntimeTypingViolationBase, HandleViolationMode, RuntimeTypingError, RuntimeTypingWarning, ) from runtime_typing.utils import ( contains, get_root, valid_args_from_literal, Parameter, ) class TypedFunction: def __init__( self, func: TypingCallable, kwargs: dict, mode: "HandleViolationMode", defer: bool, exclude: Optional[TypingIterable[str]] = None, include: Optional[TypingIterable[str]] = None, type_var_registry: Optional[dict] = None, ) -> None: self.func = func self.kwargs = kwargs self.mode = mode self.defer = defer self.type_var_registry = type_var_registry or {} self.exclude = set(exclude) if exclude else set() self.include = set(include) if include else set() self.violations = [] self.return_value = None @property def annotated_arguments(self) -> Dict[str, _GenericAlias]: return get_type_hints(self.func) @property def typed_arguments(self) -> Dict[str, _GenericAlias]: include = ( self.annotated_arguments.keys() if not self.include else self.include ) - self.exclude return dict( filter( lambda item: item[0] in include, self.annotated_arguments.items(), ) ) def __call__( self, ) -> Union[Any, Tuple[Any, List[RuntimeTypingViolationBase]]]: for arg_name, condition in self.typed_arguments.items(): if arg_name == "return": continue try: val = self.kwargs[arg_name] except KeyError: raise TypeError( f"`{self.func.__name__}()` missing required positional " f"argument `{arg_name}`." ) self.validate_entity( parameter=Parameter(value=val, name=arg_name), condition=condition, ) result = self.func(**self.kwargs) if "return" in self.typed_arguments: self.validate_entity( parameter=Parameter(value=result, name="return"), condition=get_type_hints(self.func)["return"], ) self.result = result if self.mode == "return": return self.result, self.violations return self.result def handle_violations(self) -> List[RuntimeTypingViolationBase]: if self.violations: message = "\n + " + "\n + ".join( [violation.message for violation in self.violations] ) if self.mode == "raise": raise RuntimeTypingError(message) if self.mode == "warn": warn(message, RuntimeTypingWarning) return self.violations def validate_entity( self, parameter: "Parameter", condition: _GenericAlias, ) -> "RuntimeTypingViolationBase": """Check whether entity of `name` and `val` violates condition, recursively walking through nested condition.""" root = get_root(condition) try: validation_method = { # Annotated: self.__validate_annotated, Any: self.__validate_any, Union: self.__validate_union, Literal: self.__validate_literal, Callable: self.__validate_callable, Iterable: self.__validate_iterable, TypedDict: self.__validate_typed_dict, TypeVar: self.__validate_type_var, type: self.__validate_type, dict: self.__validate_dict, list: self.__validate_list, set: self.__validate_set, frozenset: self.__validate_frozenset, tuple: self.__validate_tuple, }[root] validation_method(parameter=parameter, condition=condition) except KeyError: self.__validate_primitive( parameter=parameter, expected_type=condition, constraints=tuple(), ) def __add_violation( self, expected: Any, got: Any, category: str, parameter_name: str ) -> None: self.violations.append( RuntimeTypingViolation( obj=self.func, expected=expected, got=got, category=category, parameter_name=parameter_name, mode=self.mode, defer=self.defer, ) ) def __validate_any( self, parameter: "Parameter", condition: _GenericAlias ) -> None: pass def __validate_primitive( self, parameter: "Parameter", expected_type: _GenericAlias, constraints: Tuple[type, ...], ): if constraints: if not any( isinstance(parameter.value, constraint) for constraint in constraints ): self.__add_violation( expected=constraints, got=type(parameter.value), category="type of argument", parameter_name=parameter.name, ) if not isinstance(parameter.value, expected_type): self.__add_violation( expected=expected_type, got=type(parameter.value), category="type of argument", parameter_name=parameter.name, ) def __validate_type_var( self, parameter: "Parameter", condition: _GenericAlias, ) -> None: cond = condition if condition in self.type_var_registry: expected_type = self.type_var_registry[condition] else: expected_type = type(parameter.value) self.type_var_registry[condition] = type(parameter.value) self.__validate_primitive( parameter=parameter, expected_type=expected_type, constraints=condition.__constraints__, ) def __validate_typed_dict( self, parameter: "Parameter", condition: _GenericAlias, ) -> None: if not isinstance(parameter.value, dict): self.__add_violation( expected=dict, got=type(parameter.value), parameter_name=parameter.name, category="type of argument", ) for expected_key, expected_type in get_type_hints(condition).items(): if not expected_key in parameter.value: self.__add_violation( expected=expected_key, got=None, parameter_name=parameter.name, category="key in TypedDict", ) continue self.validate_entity( Parameter( parameter.value[expected_key], f"{parameter.name}.{expected_key}", ), expected_type, ) def __validate_sequence( self, parameter: "Parameter", condition: _GenericAlias, sequence_type: type, ) -> None: """Validate sequence types (with potential inner_condition).""" if not isinstance(parameter.value, sequence_type): self.__add_violation( expected=sequence_type, got=type(parameter.value), parameter_name=parameter.name, category="type of argument", ) try: inner_condition = get_args(condition)[0] for element_val in parameter.value: self.validate_entity( parameter=Parameter(element_val, parameter.name), condition=inner_condition, ) except IndexError: pass def __validate_list( self, parameter: "Parameter", condition: _GenericAlias ) -> None: return self.__validate_sequence(parameter, condition, list) def __validate_set( self, parameter: "Parameter", condition: _GenericAlias ) -> None: return self.__validate_sequence(parameter, condition, set) def __validate_frozenset( self, parameter: "Parameter", condition: _GenericAlias ) -> None: return self.__validate_sequence(parameter, condition, frozenset) def __validate_iterable( self, parameter: "Parameter", condition: _GenericAlias, ) -> None: return self.__validate_sequence( parameter=parameter, condition=condition, sequence_type=Iterable ) def __validate_tuple( self, parameter: "Parameter", condition: _GenericAlias, ) -> None: if not isinstance(parameter.value, tuple): self.__add_violation( expected=tuple, got=typ(parameter.value), category="type of argument", parameter_name=parameter.name, ) inner_condition = get_args(condition) if inner_condition[-1] is Ellipsis: for element_val in parameter.value: self.validate_entity( parameter=Parameter(element_val, parameter.name), condition=inner_condition[-2], ) else: if not len(parameter.value) == len(inner_condition): self.__add_violation( expected=len(inner_condition), got=len(parameter.value), category="length of argument", parameter_name=parameter.name, ) for element_val, element_condition in zip( parameter.value, inner_condition ): self.validate_entity( parameter=Parameter(value=element_val, name=parameter.name), condition=element_condition, ) def __validate_union( self, parameter: "Parameter", condition: _GenericAlias, entity_or_type: Literal["entity", "type"] = "entity", ) -> None: inner_condition = get_args(condition) union_violations = [] for inner_argument in inner_condition: aux = TypedFunction( func=self.func, defer=True, mode=self.mode, kwargs=self.kwargs, type_var_registry=self.type_var_registry, ) if entity_or_type == "entity": aux.validate_entity( parameter=parameter, condition=inner_argument ) if entity_or_type == "type": aux.__validate_type( parameter=parameter, condition=inner_argument ) if not aux.violations: self.type_var_registry.update(aux.type_var_registry) return union_violations += aux.violations if union_violations: self.violations.append( ComplexRuntimeTypingViolation( violations=union_violations, mode=self.mode, defer=self.defer, ) ) def __validate_literal( self, parameter: "Parameter", condition: _GenericAlias, ) -> None: valid_values = valid_args_from_literal(condition) if not contains(valid_values, parameter.value): self.__add_violation( expected=valid_values, got=parameter.value, parameter_name=parameter.name, category="value of argument", ) def __validate_callable( self, parameter: "Parameter", condition: _GenericAlias, ) -> None: if not callable(parameter.value): self.__add_violation( expected="collections.abc.Callable", got=type(parameter.value), parameter_name=parameter.name, category="type of argument", ) inner_condition = get_args(condition) if inner_condition: condition_arg_types, condition_return_type = inner_condition val_hints = get_type_hints(parameter.value) val_arg_types = list( val_hints[arg] for arg in val_hints.keys() if arg != "return" ) try: val_return_type = val_hints["return"] except KeyError: val_return_type = None if len(val_arg_types) != len(condition_arg_types): self.__add_violation( expected=len(condition_arg_types), got=len(val_arg_types), parameter_name=parameter.name, category="length of value of argument", ) for index, (val_arg_type, condition_arg_type) in enumerate( zip(val_arg_types, condition_arg_types) ): if val_arg_type != condition_arg_type: self.__add_violation( expected=condition_arg_type, got=val_arg_type, parameter_name=parameter.name, category=f"{index + 1}. argument's type in callable " f"argument ", ) if val_return_type != condition_return_type: self.__add_violation( expected=condition_return_type, got=val_return_type, parameter_name=parameter.name, category="return type of callable argument", ) def __validate_dict( self, parameter: "Parameter", condition: _GenericAlias ) -> None: if not isinstance(parameter.value, dict): self.__add_violation( expected=dict, got=type(parameter.value), parameter_name=parameter.name, category="type of argument", ) inner_condition = get_args(condition) if inner_condition: key_type, value_type = inner_condition aux = TypedFunction( func=self.func, defer=True, mode=self.mode, kwargs=self.kwargs, type_var_registry=self.type_var_registry, ) for key in parameter.value.keys(): aux.validate_entity( parameter=Parameter( value=key, name=f"key in `{parameter.name}`", ), condition=key_type, ) for value in parameter.value.values(): aux.validate_entity( parameter=Parameter( value=value, name=f"value in `{parameter.name}`", ), condition=value_type, ) if aux.violations: self.violations.append( ComplexRuntimeTypingViolation( aux.violations, mode=self.mode, defer=self.defer ) ) def __validate_type( self, parameter: "Parameter", condition: _GenericAlias, ) -> None: if type(parameter.value) is not type: self.__add_violation( expected=type, got=type(parameter.value), parameter_name=parameter.name, category="type of argument", ) if type(condition) is type: if not issubclass(parameter.value, condition): self.__add_violation( expected=condition, got=parameter.value, parameter_name=parameter.name, category="argument", ) else: inner_condition = get_args(condition) if inner_condition: for inner_type in inner_condition: root = get_root(inner_type) if root is Any: continue if root is Union: self.__validate_union( parameter=parameter, condition=inner_type, entity_or_type="type", ) continue if root is TypeVar: if inner_type in self.type_var_registry: self.__validate_type( parameter=parameter, condition=self.type_var_registry[inner_type], ) inner_type = self.type_var_registry[inner_type] else: self.type_var_registry[inner_type] = parameter.value continue if not issubclass(parameter.value, inner_type): self.__add_violation( expected=inner_condition, got=type(parameter.value), parameter_name=parameter.name, category="type of argument", )
/runtime_typing-1.0.0.tar.gz/runtime_typing-1.0.0/runtime_typing/typed_function.py
0.906614
0.239161
typed_function.py
pypi
from abc import ABC, abstractmethod from contextlib import suppress from typing import Any, List, Literal, Optional from warnings import warn class RuntimeTypingError(Exception): pass class RuntimeTypingWarning(Warning): pass HandleViolationMode = Literal["raise", "warn", "return"] class RuntimeTypingViolationBase(ABC): """Abstract Base Class of Violations of Typing Constraints.""" def __init__(self, mode: "HandleViolationMode", defer: bool): self._mode = mode self._defer = defer if not defer: self.handle() @abstractmethod def __add__(self, other: "RuntimeTypingViolationBase"): pass def __radd__( self, other: Optional["RuntimeTypingViolationBase"] = None ) -> "RuntimeTypingViolationBase": if other is None: return self def __repr__(self): return self.message def handle(self, mode: Optional[Literal["raise", "warn", "return"]] = None): """Handle the violation (i.e. raise, warn or return it). Parameters ---------- mode How to handle the violation. If set, overrides the `mode` attribute of the violation. """ mode = mode or self._mode if mode == "raise": raise RuntimeTypingError(self.message) if mode == "warn": warn(self.message, RuntimeTypingWarning) return self @property @abstractmethod def message(self) -> str: pass class ComplexRuntimeTypingViolation(RuntimeTypingViolationBase): """Container of multiple TypingViolations. Attributes ---------- violations List of the TypingViolations. conjunction Whether the TypingViolations are AND- or OR-combined. message A human-readable message used for raising and warning. """ def __init__( self, violations: List["RuntimeTypingViolation"], mode: "HandleViolationMode" = "raise", defer: bool = False, conjunction: Literal["and", "or"] = "or", ): self.violations = violations self.conjunction = conjunction super().__init__(mode=mode, defer=defer) @property def message(self) -> str: with suppress(KeyError): if self.conjunction == "or": obj = self.violations[0].obj category = self.violations[0].category parameter_name = self.violations[0].parameter_name got = self.violations[0].got return ( f"TypingViolation in {obj.__class__.__name__} " f"`{obj.__name__}`: Expected " f"{category + ' ' if category else ''}`{parameter_name}` to " f"be one of {[v.expected for v in self.violations]} (got `{got}`)." ) violations_messages = f"\n\t".join( [violation.message for violation in self.violations] ) return f"TypingViolation:\n\t{violations_messages}" def __add__( self, other: Optional["RuntimeTypingViolationBase"] = None ) -> "ComplexRuntimeTypingViolation": if isinstance(other, ComplexRuntimeTypingViolation): return ComplexRuntimeTypingViolation( violations=self.violations + other.violations ) if isinstance(other, RuntimeTypingViolation): return ComplexRuntimeTypingViolation( violations=self.violations + [other] ) if other is None: return self class RuntimeTypingViolation(RuntimeTypingViolationBase): """Violation against Typing Annotation. Attributes ---------- obj The object the violation occurred on. parameter_name The name of the parameter the violation occurred on. expected The expected value (or type) of the parameter. got The actual value (or type) of the parameter. message A human-readable message describing the violation. This is used in RuntimeTypingViolation.handle() when raising or warning. """ def __init__( self, obj: object, category: str, parameter_name: Any, expected: Any, got: Any, mode: HandleViolationMode = "raise", defer: bool = False, ) -> None: self.obj = obj self.category = category self.parameter_name = parameter_name self.expected = expected self.got = got super().__init__(mode=mode, defer=defer) def __add__( self, other: Optional["RuntimeTypingViolationBase"] ) -> "RuntimeTypingViolationBase": if other is None: return self if isinstance(other, ComplexRuntimeTypingViolation): return other + self return ComplexRuntimeTypingViolation(violations=[self, other]) @property def message(self): if hasattr(self.expected, "__length__") and not hasattr( self.got, "__length__" ): expected = f"one of `{self.expected}`" else: expected = f"`{self.expected}`" return ( f"TypingViolation in {self.obj.__class__.__name__} " f"`{self.obj.__name__}`: Expected " f"{self.category + ' ' if self.category else ''}`{self.parameter_name}` to " f"be {expected} (got `{self.got}`)." )
/runtime_typing-1.0.0.tar.gz/runtime_typing-1.0.0/runtime_typing/violations.py
0.953253
0.283031
violations.py
pypi
import sys from collections import namedtuple from inspect import isfunction, isclass, getmembers from functools import wraps from typing import ( get_args, get_origin, Any, _GenericAlias, Iterable, Literal, Set, Union, TypedDict, TypeVar, ) Parameter = namedtuple("Parameter", "value name") def class_decorator(cls, decorator, *args, **kwargs): """Class decorator decorating all methods with decorator.""" for name, method in getmembers(cls, predicate=isfunction): setattr(cls, name, decorator(method, *args, **kwargs)) return cls def optional_arguments_to_decorator(decorator): """Make decorator accept optional arguments and classes as objects.""" @wraps(decorator) def new_decorator(*args, **kwargs): if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): if isfunction(args[0]): return decorator(args[0]) if isclass(args[0]): return class_decorator(args[0], decorator) else: return ( lambda obj: decorator(obj, *args, **kwargs) if isfunction(obj) else class_decorator(obj, decorator, *args, **kwargs) ) return new_decorator def contains(iterable: Iterable, val: Any) -> bool: try: return val in iterable except TypeError: for el in iterable: if val == el: return True return False def valid_args_from_literal(annotation: _GenericAlias) -> Set[Any]: args = get_args(annotation) valid_values = [] for arg in args: if get_origin(arg) is Literal: valid_values += valid_args_from_literal(arg) else: valid_values += [arg] return set(valid_values) def get_root(annotation: _GenericAlias) -> Union[type, Any, TypeVar]: """Wrapper around typing.get_origin to also identify TypeVar and Any.""" origin = get_origin(annotation) if origin: return origin if type(annotation) is TypeVar: return TypeVar if version_safe_is_typeddict(annotation): return TypedDict if annotation is Any: return Any def version_safe_is_typeddict(value: Any) -> bool: if sys.version_info < (3, 10): from typing import _TypedDictMeta return isinstance(value, _TypedDictMeta) from typing import is_typeddict # Second check is necessary, is_typeddict(TypedDict) is surprisingly False return is_typeddict(value) or value is TypedDict
/runtime_typing-1.0.0.tar.gz/runtime_typing-1.0.0/runtime_typing/utils.py
0.513425
0.177045
utils.py
pypi
from __future__ import annotations import os import re from pathlib import Path class EnvLoader: """Load local .env file into environment variables.""" def __init__(self, working_directory: Path | None = None) -> None: """ Create .env loader. Args: working_directory: Set the working directory where file(s) will be loaded. """ self._working_directory = working_directory or Path().cwd() def load(self, filename: str | None = None) -> bool: """ Load file to environ. Args: filename: Name of environment file to load (default: ".env") """ filename = ".env" if not filename else filename filepath = self._working_directory / filename loaded_values = self._load_values(filepath) for key, value in loaded_values.items(): os.environ[key] = value return bool(loaded_values) def _load_values(self, filepath: Path) -> dict[str, str]: """Internal: Load values from provided filename.""" try: return self._parse_env_file(filepath.read_text()) except FileNotFoundError: return {} def _parse_env_file(self, contents: str) -> dict[str, str]: """Parse env file into key-pair values.""" loaded_values: dict[str, str] = {} for line in contents.split("\n"): if not line or line.strip().startswith("#") or len(line.split("=", 1)) != 2: continue key, value = line.split("=", 1) key = self._strip_export(key).strip() value = value.strip() value = self._remove_lt_quotes(value) loaded_values[key] = value return loaded_values def _remove_lt_quotes(self, in_: str) -> str: """Remove matched leading and trailing single or double quotes.""" match = re.match(r"([\"'])(.*)\1$|^(.*)$", in_) return match.group(2) if match and match.group(2) else in_ def _strip_export(self, in_: str) -> str: """Remove leading 'export ' prefix, case agnostic.""" return re.sub(r"^\s*?export\s*", "", in_, flags=re.IGNORECASE)
/runtime_yolk-1.2.3-py3-none-any.whl/runtime_yolk/env_loader.py
0.794225
0.246046
env_loader.py
pypi
from __future__ import annotations import os import re from configparser import ConfigParser from pathlib import Path from runtime_yolk.util.file_rule import get_file_name INTERPOLATE_PATTERN = "{{(.+?)}}" class ConfigLoader: """Load and store configuration data""" def __init__(self, *, working_directory: Path | None = None) -> None: """ Create a new instance of Config. Args: working_directory: Set the working directory where file(s) will be loaded. """ self._working_directory = working_directory or Path().cwd() self.config = ConfigParser(interpolation=None) self._build_default_config() # Store loaded config file names to prevent loading the same file twice. self._loaded_configs: set[Path] = set() def _build_default_config(self) -> None: """Build and populate the default config.""" self.config["DEFAULT"] = { "environment": os.getenv("YOLK_ENVIRONMENT", ""), "logging_level": os.getenv("LOGGING_LEVEL", "WARNING"), "logging_format": "%(asctime)s - %(levelname)s - %(name)s - %(message)s", } def load( self, *, config_name: str = "application", ) -> None: """ Load configuration data from a file, layers loads onto existing loaded data. Looks for the `${config_name}.ini` in the working directory. After loading the config_name the environment value is appended to the filename before the file extension. e.g. `application.ini` becomes `application_${environment}.ini`. If found, this config is loaded next. Default ConfigParser interpolation is disabled. Values with the pattern of `{{KEYWORD}}` are interpolated a single time against matching environ keys. Keywords are case sensitive. Args: config_name: The name of the configuration file without the extension. """ self._load(config_name, "") def _load(self, config_file: str, yolk_environment: str) -> None: """Interal recursive loader.""" _file = self._working_directory / get_file_name(config_file, yolk_environment) if _file.is_file() and _file not in self._loaded_configs: contents = self._interpolate_environment(_file.read_text()) # Load the discovered content as a configuration string # ConfigParser handles invalid content self.config.read_string(contents) self._loaded_configs.add(_file) # If the config file has an environment set, attempt to load the next file. if self.config.get("DEFAULT", "environment", fallback=None): self._load(config_file, self.config.get("DEFAULT", "environment")) def _interpolate_environment(self, contents: str) -> str: """Interpolate {{keywords}} to matching environment variable values.""" for match in re.finditer(INTERPOLATE_PATTERN, contents): contents = re.sub(match.group(0), os.getenv(match.group(1), ""), contents) return contents
/runtime_yolk-1.2.3-py3-none-any.whl/runtime_yolk/config_loader.py
0.841468
0.180089
config_loader.py
pypi
from __future__ import annotations import re from argparse import ArgumentParser from argparse import Namespace def _parse_args(arg_list: list[str] | None = None) -> Namespace: """Parse sys.argv.""" parser = ArgumentParser("Add, update, or remove env values from .env file.") parser.add_argument( "key", type=str, help="Name of environ variable to save.", ) parser.add_argument( "value", type=str, default="", nargs="?", help="Value to save.", ) parser.add_argument( "-U", "--update", action="store_true", help="Update existing value.", ) parser.add_argument( "-D", "--delete", action="store_true", help="Delete existing key.", ) parser.add_argument( "-F", "--file", action="store", default=".env", help="Specify filename and path, default is '.env'", ) return parser.parse_args(arg_list) def _read_file(file_: str) -> str: """Read in given file if exists, otherwise return empty string.""" try: with open(file_) as infile: return infile.read() except FileNotFoundError: return "" def _write_file(file_: str, contents: str) -> None: """Write contents to file as provided.""" with open(file_, "w") as outfile: outfile.write(contents) def _add_key(key: str, value: str, contents: str) -> str: """Add key=value to contents, returns contents. Raises KeyError if key exists.""" if re.search(rf"{key.upper()}(\s+)?=", contents): raise KeyError("Key already exists in target file.") lines = contents.split("\n") lines.append(f"{key.upper()}={value}") return "\n".join(lines) def _update_key(key: str, value: str, contents: str) -> str: """Update key=value, returns contents. Raises KeyError if key is missing.""" sub_pattern = re.compile(rf"{key.upper()}(\s+)?=.+") if not sub_pattern.search(contents): raise KeyError("Key to update was not found in file.") return sub_pattern.sub(f"{key.upper()}={value}", contents) def _delete_key(key: str, contents: str) -> str: """Delete key, returns contents. Raises KeyError if key is missing.""" sub_pattern = re.compile(rf"{key.upper()}(\s+)?=.+") if not sub_pattern.search(contents): raise KeyError("Key to update was not found in file.") lines = [line for line in contents.split("\n") if not sub_pattern.search(line)] return "\n".join(lines) def main(_args: list[str] | None = None) -> int: """Entry point for cli.""" args = _parse_args(_args) contents = _read_file(args.file) try: if args.delete: contents = _delete_key(args.key, contents) elif args.update: contents = _update_key(args.key, args.value, contents) else: contents = _add_key(args.key, args.value, contents) except KeyError as error: print(f"Error: {error}") return 1 _write_file(args.file, contents) return 0 if __name__ == "__main__": raise SystemExit(main())
/runtime_yolk-1.2.3-py3-none-any.whl/runtime_yolk/env_cli.py
0.593609
0.167627
env_cli.py
pypi
from importlib.resources import Package from pathlib import Path from os import PathLike from setuptools import Command from setuptools.command.build import build from typing import Any, Callable, List, Union PathType = str | PathLike[Any] DEFAULT_FN = "runtime_build" def load_python_config(config_file_path: Path, local_variable_name: str) -> dict: config_file_contents = config_file_path.read_text() context: dict[str, Any] = dict(__file__=str(config_file_path.resolve())) exec(config_file_contents, context) return context.get(local_variable_name, {}) def base_for_package(package: Package, src_root: PathType) -> Path: base_path = Path(src_root).resolve() for package_part in package.split("."): base_path /= package_part return base_path def load_build_args_for_package( package: Package, build_file_name: PathType, src_root: PathType ) -> dict[PathType, Callable[[Path], Path]]: build_path = base_for_package(package, src_root) / build_file_name return load_python_config(build_path, "BUILD_ARGUMENTS") def build_item( package: Package, base_name: PathType, builder: Callable[[Path], Path], src_root: PathType, ) -> Path: target_path = base_for_package(package, src_root) / base_name builder(target_path) return target_path def build_on_demand( package: Package, base_name: PathType, build_file_name: PathType = DEFAULT_FN, src_root: PathType = ".", ): build_args = load_build_args_for_package(package, build_file_name, src_root) builder = build_args.get(base_name) if builder is None: raise ValueError(f"unknown resource {base_name}") return build_item(package, base_name, builder, src_root) def build_all_at_build_time( packages: List[Package], build_file_name: PathType = DEFAULT_FN, src_root: PathType = ".", ) -> List[Path]: built = [] for package in packages: built.extend(build_all_items_for_package(package, build_file_name, src_root)) return built def build_all_items_for_package( package: Package, build_file_name: PathType = DEFAULT_FN, src_root: PathType = "." ) -> List[Path]: build_args = load_build_args_for_package(package, build_file_name, src_root) return [ build_item(package, base_name, builder, src_root) for base_name, builder in build_args.items() ] def build_runtime_setuptools(*all_packages: List[Package]) -> Command: """ Use this in `setup.py` as follows: ```python from runtime_builder import build_runtime_setuptools RUNTIME_BUILD_PACKAGE_LIST = ["package.foo", "package.bar", ...] # list of packages containing `runtime_build` files (so there is one # at `package/foo/runtime_build` and one at `package/bar/runtime_build`) setup( ... include_package_data=True, cmdclass={ "build_runtime_builder_artifacts": build_runtime_setuptools(RUNTIME_BUILD_PACKAGE_LIST), }, ) ``` This example invokes `build_all_items_for_package` at just the right time. Using `setuptools` to do this build isn't as satisfying as `enscons`, as I've not been able to figure out a way to only include source files in the sdist. This is a consequence of `setuptools` being a really terrible legacy mess. """ build.sub_commands.insert(0, ("build_runtime_builder_artifacts", lambda x: 1)) class BuildRuntimeBuilderArtifactsCommand(Command): description = "Build runtime_builder artifacts files" user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): # Custom code to generate the .hex files for package in all_packages: built_items = build_all_items_for_package(package) return BuildRuntimeBuilderArtifactsCommand
/runtime_builder-0.1.1.tar.gz/runtime_builder-0.1.1/runtime_builder.py
0.795301
0.326298
runtime_builder.py
pypi
import logging, json from dataclasses import dataclass from datetime import datetime import dateutil.parser from robot.libraries.BuiltIn import BuiltIn from robot.libraries import DateTime as RobotDateTime from RW import platform from RW.Core import Core logger = logging.getLogger(__name__) def _overwrite_shell_rsp_stdout( rsp: platform.ShellServiceResponse, new_stdout: str, ) -> platform.ShellServiceResponse: new_rsp: platform.ShellServiceResponse = platform.ShellServiceResponse( cmd=rsp.cmd, parsed_cmd=rsp.parsed_cmd, stdout=new_stdout, stderr=rsp.stderr, returncode=rsp.returncode, status=rsp.status, body=rsp.body, errors=rsp.errors, ) return new_rsp def verify_rsp( rsp: platform.ShellServiceResponse, expected_rsp_statuscodes: list[int] = [200], expected_rsp_returncodes: list[int] = [0], contains_stderr_ok: bool = True, ) -> None: if not contains_stderr_ok and rsp.stderr: raise ValueError(f"rsp {rsp} contains unexpected stderr {rsp.stderr}") if rsp.status not in expected_rsp_statuscodes: raise ValueError(f"rsp {rsp} has unexpected HTTP status {rsp.status}") if rsp.returncode not in expected_rsp_returncodes: raise ValueError(f"rsp {rsp} has unexpected shell return code {rsp.returncode}") def _string_to_datetime(duration_str: str, date_format_str="%Y-%m-%dT%H:%M:%SZ"): now = RobotDateTime.get_current_date(result_format=date_format_str) time = RobotDateTime.convert_time(duration_str) past_date = RobotDateTime.subtract_time_from_date(now, time, result_format=date_format_str) return past_date def from_json(json_str: str): return json.loads(json_str, strict=False) def to_json(json_data: any): return json.dumps(json_str) def filter_by_time( list_data: list, field_name: str, operand: str = "filter_older_than", duration_str: str = "30m", ): results: list = [] time_to_filter = _string_to_datetime(duration_str) time_to_filter = dateutil.parser.parse(time_to_filter).replace(tzinfo=None) for row in list_data: if field_name not in row: continue row_time = dateutil.parser.parse(row[field_name]).replace(tzinfo=None) logger.info(f"types: {type(row_time)} {type(time_to_filter)}") logger.info(f"compare: {row_time} {time_to_filter} and >=: {row_time >= time_to_filter}") if operand == "filter_older_than": if row_time >= time_to_filter: results.append(row) elif operand == "filter_newer_than": if row_time <= time_to_filter: results.append(row) else: logger.info(f"dropped: {row}") return results def escape_str_for_exec(string: str, escapes: int = 1) -> str: """Simple helper method to escape specific characters that cause issues in the pod exec passthrough Args: string (str): original string for exec passthrough Returns: str: string with triple escaped quotes for passthrough """ string = string.replace('"', "\\" * escapes + '"') return string @dataclass class IssueCheckResults: """ Used to keep function signatures from getting too busy when passing issue data around. """ query_type: str = "" severity: int = 4 title: str = "" expected: str = "" actual: str = "" reproduce_hint: str = "" issue_found: bool = False details: str = "" next_steps: str = ""
/runwhen_cli_keywords-0.0.4-py3-none-any.whl/RW/CLI/cli_utils.py
0.640973
0.257467
cli_utils.py
pypi
import re from typing import Optional, Union import requests from RW import restclient from RW import platform from RW.Utils import utils class Kubectl: #TODO: remove and incorporate into K8s v3 rework """ Kubectl keyword library can be used to interact with Kubernetes clusters via kubectl location service. """ ROBOT_LIBRARY_SCOPE = "GLOBAL" def __init__(self): try: self.kubectl_service_endpoint = platform.import_platform_variable( "RW_KUBECTL_SERVICE_ENDPOINT" ) except ImportError: self.kubectl_service_endpoint = ( "http://kubectl-service.location.svc.cluster.local/kubectl/" ) self._kubeconfig = None def set_kubeconfig(self, kubeconfig: str): self._kubeconfig = kubeconfig def kubectl( self, *args, expected_status=None, ) -> object: """ Run kubectl command. """ if self._kubeconfig is None: raise Exception( "Kubeconfig needs to be set before running kubectl commands" ) options = " ".join(args) body = {"options": str(options), "kubeconfig": str(self._kubeconfig)} rsp = requests.post(self.kubectl_service_endpoint, json=body) content = utils.from_json(rsp.content) if expected_status is not None: if utils.is_scalar(expected_status): expected_status = [expected_status] expected_status = utils.to_int(expected_status) if content["exit_code"] not in expected_status: raise AssertionError( f"Expected exit code {expected_status} but received" + f" {content['exit_code']}" + f"\n command: {content['command']}" + f"\n stdout: {content['stdout']}" + f"\n stderr: {content['stderr']}" ) return content def stdout_to_lists(self, stdout): stdout_lists = [] for line in stdout.splitlines(): stdout_lists.append(line.split()) return stdout_lists def get_kubectl_list_column(self, stdout_lists, index: int): """ Helper function to return a column as a list from the stdout lists of a kubectl command """ result_column = [] for row in stdout_lists: result_column.append(row[index]) return result_column def remove_units( self, data_points, ): """ Iterates over list and removes units - ``data_points`` list of string values containing numerical value substrings Examples: | RW.Kubectl.Remove Units | ${str_list} Return Value: | List of floats | """ cleaned = [] for d in data_points: numerical = float( "".join(i for i in d if i.isdigit() or i in [".", "-"]) ) cleaned.append(numerical) return cleaned
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/Kubectl.py
0.464173
0.286168
Kubectl.py
pypi
import jira from typing import Optional from RW.Utils import utils class Jira: #TODO: refactor for new platform use """ Jira is a keyword library for integrating with the Jira system. You need to provide a Jira server URL, a Jira User, and a Jira User Token to use this library. The first step is to authenticate using `Connect To Jira`. """ ROBOT_LIBRARY_SCOPE = "GLOBAL" def __init__(self) -> None: self.auth_jira = None def connect_to_jira(self, server: str, user: str, token: str) -> None: """ Authentication for Jira. This step is required before performing any Jira operations. Examples: | Import User Variable | JIRA_URL | | | | Import User Variable | JIRA_USER | | | | Import User Variable | JIRA_USER_TOKEN | | | | Connect To Jira | server=${JIRA_URL} | user=${JIRA_USER} | token=${JIRA_USER_TOKEN} | """ self.auth_jira = jira.JIRA(server, basic_auth=(user, token)) def create_issue( self, project: str, summary: str, description: str, verbose: bool = False, ) -> object: """ Create a new Jira issue. Examples: | ${issue} = | Create Issue | APP | App core dumps | Long description... | Return Value: | Issue data | """ issue = self.auth_jira.create_issue( project=project, summary=summary, description=description, issuetype={ "name": "Bug" }, # "Epic", "New Feature", "Task", "Improvement" ) if verbose: utils.debug_log( f"Jira create issue result:\n{utils.prettify(issue.__dict__)}", console=False, ) return issue # inspect issue.__dict__ for more details def get_issue( self, issue_id: str, fields: Optional[str] = None, verbose: bool = False, ) -> object: """ Get a Jira issue. ``fields`` is a comma-separated string of issue fields. Tip: You can first get the issue which will include all the issue fields, then browse through the fields and decide on a smaller set of fields to return in the result. Examples: | ${issue} = | Get Issue | 1234 | Return Value: | Issue data | """ issue = self.auth_jira.issue(issue_id, fields=fields) if verbose: utils.debug_log( f"Jira get issue result:\n{utils.prettify(issue.__dict__)}", console=False, ) return issue def assign_issue(self, issue_id: str, assignee: str) -> bool: """ Assign a user to the issue. Examples: | Assign Issue | 1234 | vui | Return Value: | Always returns True | """ return self.auth_jira.assign_issue(issue_id, assignee) def search_issues(self, project: str, verbose: bool = False) -> object: """ Search Jira This keyword currently returns all the issues in a project. Examples: | ${issues} = | Search Issues | APP | Return Value: | List of issues | """ issues = self.auth_jira.search_issues(f"project={project}") if verbose: utils.debug_log( f"Jira issues:\n{utils.prettify(issues.__dict__)}", console=False, ) return issues
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/Jira.py
0.516595
0.49469
Jira.py
pypi
from dataclasses import dataclass from robot.libraries.BuiltIn import BuiltIn from typing import Union from RW.Utils import utils from RW.Utils.utils import Status from RW import platform class Elasticsearch: #TODO: refactor for new platform use """ Elasticsearch is a keyword library for integrating with the Elasticsearch search engine. At this time, basic authentication is done by passing the username/password in the URL. """ ROBOT_LIBRARY_SCOPE = "GLOBAL" def __init__(self) -> None: BuiltIn().import_library("RW.HTTP") self.rw_http = BuiltIn().get_library_instance("RW.HTTP") def get_health_status( self, url: str, verbose: Union[str, bool] = False, ) -> None: """ Check the Elasticsearch cluster health status. The status can be "green", "yellow", or "red". Examples: | ${res} = | Get Health Status | ${ELASTICSEARCH_URL} | Return Value: | Health data | """ verbose = utils.to_bool(verbose) r = self.rw_http.get(f"{url}/_cluster/health") if verbose is True: platform.debug_log(r) status: Status = Status.NOT_OK if r.status_code in [200] and r.json()["status"] == "green": status = Status.OK @dataclass class Result: original_content: object content: dict status_code: int = r.status_code reason: str = r.reason cluster_name: str = r.json()["cluster_name"] cluster_status: str = r.json()["status"] ok_status: Status = status ok: int = status.value return Result(r, r.json()) def get_shard_health_status( self, url: str, index: str, verbose: Union[str, bool] = False, ) -> None: """ Check the Elasticsearch cluster shard health status. The status can be "green", "yellow", or "red". Examples: | ${res} = | Get Shard Health Status | ${ELASTICSEARCH_URL} | index=.geoip_databases | Return Value: | Health data | """ verbose = utils.to_bool(verbose) r = self.rw_http.get(f"{url}/_cluster/health/{index}?level=shards") if verbose is True: platform.debug_log(r) status: Status = Status.NOT_OK if r.status_code in [200] and r.json()["status"] == "green": status = Status.OK @dataclass class Result: original_content: object content: dict status_code: int = r.status_code reason: str = r.reason cluster_name: str = r.json()["cluster_name"] cluster_status: str = r.json()["status"] ok_status: Status = status ok: int = status.value return Result(r, r.json())
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/Elasticsearch.py
0.634883
0.320017
Elasticsearch.py
pypi
from typing import Union from dataclasses import dataclass from robot.libraries.BuiltIn import BuiltIn from RW.Utils import utils from RW.Utils.utils import Status class Grafana: #TODO: refactor for new platform use """ Grafana is a keyword library for integrating with the Grafana Dashboard. You need to provide a Grafana URL and a Grafana API Key to use this library. The first step is to authenticate using `Grafana Create Session`. """ ROBOT_LIBRARY_SCOPE = "GLOBAL" def __init__(self) -> None: self.session = None self.grafana_url = None self.grafana_api_key = None def grafana_create_session(self, url, api_key) -> None: """ Authentication for Grafana. This step is required before performing any Grafana operations. Examples: | Import User Variable | GRAFANA_URL | | | | Import User Variable | GRAFANA_API_KEY | | | | ${session} = | Grafana Create Session | ${GRAFANA_URL} | ${GRAFANA_API_KEY} | Return Value: | Grafana session | See also: `Grafana Close Session` """ BuiltIn().import_library("RW.HTTP") self.grafana_url = url self.grafana_api_key = api_key self.session = restclient.create_authenticated_session( self.grafana_url, token=self.grafana_api_key ) return self.session def grafana_close_session(self): """ Close down the Grafana session. Examples: | Grafana Close Session | ${session} | See also: `Grafana Create Session` """ if self.session is not None: self.rw_http.close_session(self.session) def get_health_status( self, session: Optional[object] = None, verbose: Union[str, bool] = False, ) -> None: """ Check Grafana health status. Examples: | Import User Variable | GRAFANA_URL | | | | Import User Variable | GRAFANA_API_KEY | | | | ${session} = | Grafana Create Session | ${GRAFANA_URL} | ${GRAFANA_API_KEY} | | ${health_status} = | RW.Grafana.Get Health Status | ${session} | | | Grafana Close Session | ${session} | | | Return Value: | Grafana health data | """ verbose = utils.to_bool(verbose) r = self.rw_http.get(f"{self.grafana_url}/api/health", session=session) if verbose is True: utils.debug_log(r) status: Status = Status.NOT_OK if r.status_code in [200] and r.json()["database"] == "ok": status = Status.OK @dataclass class Result: original_content: object content: dict status_code: int = r.status_code reason: str = r.reason ok_status: Status = status ok: int = status.value return Result(r, r.json())
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/Grafana.py
0.650023
0.336808
Grafana.py
pypi
from pdpyras import APISession from typing import Optional from RW.Utils import utils class PagerDuty: #TODO: refactor for new platform use """ PagerDuty keyword library can be used to create new incident in PagerDuty. """ ROBOT_LIBRARY_SCOPE = "GLOBAL" def __init__(self, api_token: Optional[str] = None) -> None: self.api_token = api_token def set_api_token(self, api_token=str) -> str: """ Set the PagerDuty API Token. If the token is set then subsequent calls to PagerDuty keywords such as `Create Incident` don't need to specify the token. Examples: | Import User Variable | PAGERDUTY_API_TOKEN | | RW.PagerDuty.Set API Token | ${PAGERDUTY_API_TOKEN} | """ self.api_token = api_token def _get_api_token(self) -> str: """ Return the PagerDuty API Token which was previously set using `Set API Token`. Examples: | ${pd_token} = | RW.PagerDuty.Get API Token | Return Value: | PagerDuty token | """ if self.api_token is None: raise core.TaskError("PagerDuty: API token is not defined.") return self.api_token def get_user_id( self, user_name: str, api_token: Optional[str] = None, ) -> str: """ Get the user ID for the given PagerDuty user. Examples: | ${pd_user_id} = | RW.PagerDuty.Get User ID | vui | Return Value: | PagerDuty User ID | """ if api_token is None: api_token = self._get_api_token() session = APISession(api_token) for user in session.iter_all("users"): if user_name == user["name"]: platform.debug_log( f"PagerDuty: Found user '{user['name']}'," f" ID: {user['id']}, email: {user['email']}", console=False, ) return user["id"] raise core.TaskError( f"PagerDuty: Cannot find user ID for '{user_name}'." ) def get_service_id( self, service_name: str, api_token: Optional[str] = None, ) -> str: """ Get the Service ID for the given PagerDuty service name. Examples: | ${pd_service_id} = | RW.PagerDuty.Get Service ID | app-a | Return Value: | PagerDuty User ID | """ if api_token is None: api_token = self._get_api_token() session = APISession(api_token) for service in session.iter_all("services"): if service_name == service["name"]: platform.debug_log( f"PagerDuty: Found service '{service['name']}'," f" ID: {service['id']}," f" description: {service['description']}", console=False, ) return service["id"] raise core.TaskError( f"PagerDuty: Cannot find service ID for '{service_name}'." ) def _create_incident( self, title: str, service_name: str, user_name: Optional[str] = None, api_token: Optional[str] = None, ) -> object: """ Create PagerDuty incident. :return: Incident result """ if api_token is None: api_token = self.api_token session = APISession(api_token) service_id: str = self.get_service_id( service_name, api_token=api_token ) payload = { "type": "incident", "title": title, "service": {"id": service_id, "type": "service_reference"}, } if user_name is not None: user_id: str = self.get_user_id(user_name, api_token=api_token) payload["assignments"] = [ {"assignee": {"id": user_id, "type": "user_reference"}} ] pd_incident = session.rpost("incidents", json=payload) platform.debug_log( f"PagerDuty: Incident details: {pd_incident}", console=False ) return pd_incident def create_incident( self, title: str, service_name: str, api_token: Optional[str] = None, ) -> object: """ Create PagerDuty incident. Examples: | ${pd_incident} = | Create Incident | App server is down | app-a | api_token=${token} | Return Value: | PagerDuty ncident data | """ return self._create_incident( title=title, service_name=service_name, api_token=api_token ) def create_incident_and_assign_user( self, title: str, service_name: str, user_name: str, api_token: Optional[str] = None, ) -> object: """ Create PagerDuty incident and assign a user to the incident. Examples: | ${pd_incident} = | Create Incident And Assign User | App server is down | app-a | user_name=vui | api_token=${token} | Return Value: | PagerDuty ncident data | """ return self._create_incident( title=title, service_name=service_name, user_name=user_name, api_token=api_token, )
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/PagerDuty.py
0.623835
0.269612
PagerDuty.py
pypi
import re import requests from typing import Optional, Union from RW.Utils import utils REQUESTS_TIMEOUT = 45 #TODO: delete & cleanup to simplify HTTP interfaces - still in use by HTTP module def create_session(headers: Union[str, object, None]) -> object: session = requests.Session() update_session_headers(session, headers) return session def close_session(session) -> None: session.close() def update_session_headers( session: object, headers: Union[str, object] ) -> object: if utils.is_str(headers): headers = utils.from_json(headers) session.headers.update(headers) return session.headers def get_session_headers(session: object) -> object: return session.headers class RestClient: """REST client based on requests library.""" def __init__( self, base_url: str = "", default_timeout: Union[int, str, None] = REQUESTS_TIMEOUT, ) -> None: self._base_url = base_url if default_timeout is None: self.default_timeout = None else: self.default_timeout = utils.to_int(default_timeout) def base_url(self, url: str) -> str: if not re.match(r"^http.+", url): url = self._base_url + url else: self._base_url = url return url def _requests( self, method: str, url: str, data: Optional[object] = None, headers: Union[str, object, None] = None, session: Optional[object] = None, expected_status: Union[list[int], int, None] = None, timeout: Union[int, str, None] = None, verbose: Union[bool, str] = False, console: Union[bool, str] = False, ) -> str: if timeout is None: timeout = self.default_timeout else: timeout = utils.to_int(timeout) url = self.base_url(url) if utils.is_json(data): data = utils.from_json(data) if utils.is_json(headers): headers = utils.from_json(headers) if session is None: fn = requests.request else: fn = session.request r = fn(method, url, json=data, headers=headers, timeout=timeout) if expected_status is not None: if utils.is_scalar(expected_status): expected_status = [expected_status] expected_status = utils.to_int(expected_status) if r.status_code not in expected_status: raise AssertionError( f"Expected {expected_status} but received {r.status_code}" + f"\n from: {url} {data}" + f"\n content: {r.text}" ) if utils.to_bool(verbose) is True: platform.debug_log( f"HTTP {method} {url}, data: {data}, timeout: {timeout}, " + f" status_code: {r.status_code}, content: {r.content}", console=utils.to_bool(console), ) return r def get(self, *args, **kwargs) -> object: return self._requests("GET", *args, **kwargs) def post(self, *args, **kwargs) -> object: return self._requests("POST", *args, **kwargs) def put(self, *args, **kwargs) -> object: return self._requests("PUT", *args, **kwargs) def patch(self, *args, **kwargs) -> object: return self._requests("PATCH", *args, **kwargs) def delete(self, *args, **kwargs) -> object: return self._requests("DELETE", *args, **kwargs)
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/restclient.py
0.535584
0.184198
restclient.py
pypi
import re, os, random, traceback import requests from typing import Optional, Union, List from robot.libraries.BuiltIn import BuiltIn from RW import platform from RW.Utils import utils class RWUtils: #TODO: merge with utils """Utility keyword library for useful bits and bobs.""" ROBOT_LIBRARY_SCOPE = "GLOBAL" def prettify(self, *args, **kwargs) -> str: return utils.prettify(*args, **kwargs) def is_string(self, val) -> bool: """ Check if argument is a string. :param val: Value to check """ return utils.is_str(val) def is_integer(self, val) -> bool: """ Check if argument is an integer. :param val: Value to check """ return utils.is_int(val) def is_boolean(self, val) -> bool: """ Check if argument is a boolean. :param val: Value to check """ return utils.is_bool(val) def to_json(self, *args, **kwargs) -> str: """ Convert from Python dictionary to JSON string. """ return utils.to_json(*args, **kwargs) def string_to_json(self, *args, **kwargs) -> str: """Convert a string to a JSON serializable object and return it. :param str: JSON string :return: JSON serializable object of the string """ return utils.string_to_json(*args, **kwargs) def search_json(self, *args, **kwargs) -> dict: """Search JSON dictionary using jmespath. :data dict: JSON dictionary to search through. :pattern str: Pattern to search. See https://jmespath.org/? to test search strings. :return: JSON Dict of search results. """ return utils.search_json(*args, **kwargs) def from_json(self, *args, **kwargs) -> object: """ Convert from JSON string to Python dictionary. """ return utils.from_json(*args, **kwargs) def to_boolean(self, v) -> int: """ Convert a value into a Boolean. """ return utils.to_bool(v) def to_integer(self, v) -> Union[int, List[int]]: """ Convert a value into an integer or list of integers. """ return utils.to_int(v) def parse_url(self, url: str, verbose: bool = False) -> object: """ Parse the URL into its components. Set the `verbose` parameter to ${true} to show the available components. """ return utils.parse_url(url, verbose) def get_hostname_from_url(self, url: str, verbose: bool = False) -> str: """ Get the hostname from the specified URL. """ return utils.parse_url(url, verbose).netloc.split(":")[0] def get_port_from_url(self, url: str, verbose: bool = False) -> str: """ Get the port from the specified URL. """ return utils.parse_url(url, verbose).netloc.split(":")[1] def get_protocol_from_url(self, url: str, verbose: bool = False) -> str: """ Get the protocol from the specified URL. :return: HTTP protocol (should be 'http' or 'https') """ return utils.parse_url(url, verbose).scheme def get_path_from_url(self, url: str, verbose: bool = False) -> str: """ Get the path from the specified URL. """ return utils.parse_url(url, verbose).path def get_params_from_url(self, url: str, verbose: bool = False) -> str: """ Get the parameters from the specified URL. """ return utils.parse_url(url, verbose).params def get_query_string_from_url( self, url: str, verbose: bool = False ) -> str: """ Get the query string from the specified URL. """ return utils.parse_url(url, verbose).query def generate_random_integer( self, minimum: int, maximum: int, seed: Optional[int] = None ) -> int: """ Generate a random integer N such that min <= N <= max. :param minimum: Number representing the minimum value :param maximum: Number representing the maximum value :param seed: Number to seed the random number generator. Default is ${none} in which case the current system time is used. """ random.seed(seed) return random.randint(minimum, maximum) def encode_url( self, hostname: str, params: dict, verbose: bool = False ) -> str: """ Encodes the URL and separates the URL parameters with specified separator set verbose to ${true} to show produced URL :return str """ return utils.encode_url(hostname, params, verbose)
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/Utils/RWUtils.py
0.672009
0.395689
RWUtils.py
pypi
from typing import Iterable, Any, Union, Optional import os, pprint, functools, time, json, datetime, yaml, logging, re, xml.dom.minidom, urllib.parse import jmespath from enum import Enum from benedict import benedict from robot.libraries.BuiltIn import BuiltIn from RW import platform logger = logging.getLogger(__name__) # TODO: refresh funcs using outdated dependencies # TODO: port RWUtils over to here / merge / deduplicate # TODO: add control structure keywords class Status(Enum): NOT_OK = 0 OK = 1 def is_bytes(val) -> bool: return isinstance(val, bytes) def is_str(val) -> bool: return isinstance(val, str) def is_str_or_bytes(val) -> bool: return isinstance(val, (str, bytes)) def is_int(val) -> bool: return isinstance(val, int) def is_float(val) -> bool: return isinstance(val, float) def is_bool(val) -> bool: return isinstance(val, bool) def is_scalar(val) -> bool: return isinstance(val, (int, float, str, bytes, bool, type(None))) def is_list(val) -> bool: return isinstance(val, list) def is_dict(val) -> bool: return isinstance(val, dict) def is_xml(val) -> bool: if not val or not is_str_or_bytes(val): return False try: xml.dom.minidom.parseString(val) except xml.parsers.expat.ExpatError: return False return True def is_yaml(val) -> bool: if not val or not is_str_or_bytes(val): return False try: yaml.safe_load(val) except yaml.scanner.ScannerError: return False return True def is_json(val, strict: bool = False) -> bool: if not val or not is_str_or_bytes(val): return False try: json.loads(val, strict=strict) except ValueError: return False return True def from_json(json_str, strict: bool = False) -> object: if is_json(json_str, strict=strict): return json.loads(json_str, strict=strict) else: return json_str def to_json(data: object) -> str: return json.dumps(data) def string_to_json(data: str) -> str: return json.loads(data) def search_json(data: dict, pattern: str) -> dict: result = jmespath.search(pattern, data) return result def from_yaml(yaml_str) -> object: if is_yaml(yaml_str): return yaml.load(yaml_str, Loader=yaml.SafeLoader) else: return yaml_str def to_yaml(data: object) -> str: return yaml.dump(data) def to_str(v) -> str: if is_bytes(v): return v.decode("unicode_escape") # remove double forward slashes else: return str(v) def to_bool(v) -> bool: """ Convert the input parameter into a boolean value. """ if is_bool(v): return v if is_str_or_bytes(v): if v.lower() == "true": return True elif v.lower() == "false": return False raise platform.TaskError(f"{v!r} is not a boolean value.") def to_int(v) -> Union[int, list[int]]: """ Convert the input parameter, which may be a scalar or a list, into integer value(s). """ if is_scalar(v): return int(v) elif is_list(v): return [int(x) for x in v] else: raise ValueError(f"Expected a scalar or list value (actual value: {v})") def to_float(v) -> Union[float, list[float]]: """ Convert the input parameter, which may be a scalar or a list, into float value(s). """ if is_scalar(v): return float(v) elif is_list(v): return [float(x) for x in v] else: raise ValueError(f"Expected a scalar or list value (actual value: {v})") def prettify(data) -> str: return pprint.pformat(data, indent=1, width=80) def _calc_latency(func): """Calculate the runtime of the specified function.""" @functools.wraps(func) def wrapper(*args, **kwargs): (default_ndigits, unit) = kwargs.pop("latency_params") ndigits = kwargs.get("ndigits", default_ndigits) if ndigits is not None: ndigits = int(ndigits) kwargs.pop("ndigits", None) start_time = time.perf_counter() val = func(*args, **kwargs) end_time = time.perf_counter() run_time = end_time - start_time platform.debug_log( f"Executed in {run_time:.5f} secs", console=False, ) if unit not in ["s", "ms"]: raise platform.TaskError(f"Latency unit is {unit!r} (should be 's' or 'ms').") if unit == "ms": run_time *= 1000.0 return (round(run_time, ndigits), val) return wrapper def latency(func, *args, **kwargs): @_calc_latency def doit(*args, **kwargs): return func(*args, **kwargs) return doit(*args, **kwargs) def parse_url(url: str, verbose: bool = False) -> Union[str, int]: parsed_url = urllib.parse.urlparse(url) if verbose: platform.debug_log(f"URL components: {parsed_url}", console=False) return parsed_url def encode_url(hostname: str, params: dict, verbose: bool = False) -> str: query_string = urllib.parse.urlencode(params, quote_via=urllib.parse.quote) encoded_url = hostname + query_string if verbose: platform.debug_log(f"Encoded URL: {encoded_url}", console=False) return encoded_url def parse_numerical(numeric_str: str): return float("".join(i for i in numeric_str if i.isdigit() or i in [".", "-"])) def parse_timedelta(timestring: str) -> datetime.timedelta: timedelta_regex = r"((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?" pattern = re.compile(timedelta_regex) match = pattern.match(timestring) if match: parts = {k: int(v) for k, v in match.groupdict().items() if v} # TODO: Deal with negative timedelta values? return datetime.timedelta(**parts) else: raise platform.TaskError(f"{timestring!r} is not a valid time duration.") def stdout_to_list(stdout: str, delimiter: str = ""): if delimiter: return stdout.split(delimiter) return stdout.split() def stdout_to_grid(stdout): stdout_grid = [] for line in stdout.splitlines(): stdout_grid.append(line.split()) return stdout_grid def get_stdout_grid_column(stdout_grid, index: int): """ Helper function to return a column as a list from the stdout lists of a kubectl command """ result_column = [] for row in stdout_grid: result_column.append(row[index]) return result_column def remove_units( data_points, ): """ Iterates over list and removes units """ cleaned = [] for d in data_points: numerical = float("".join(i for i in d if i.isdigit() or i in [".", "-"])) cleaned.append(numerical) return cleaned def aggregate(method: str, column: list): method = method.capitalize() if method == "Max": return max(column) elif method == "Average": return sum(column) / len(column) elif method == "Minimum": return min(column) elif method == "Sum": return sum(column) elif method == "First": return column[0] elif method == "Last": return column[-1] def yaml_to_dict(yaml_str: str): return yaml.safe_load(yaml_str) def dict_to_yaml(data: Union[dict, benedict]): if isinstance(data, benedict): return data.to_yaml() return yaml.dump(data) def list_to_string(data_list: list, join_with: str = "\n") -> str: return join_with.join(data_list) def string_if_else(check_boolean: bool, if_str: str, else_str) -> str: return if_str if check_boolean else else_str def csv_to_list(csv_str: str, strip_entries: bool = True) -> list: csv_list: list = [] if csv_str == "": csv_list = [] else: csv_list = csv_str.split(",") if csv_list and strip_entries: csv_list = [entry.strip() for entry in csv_list] return csv_list def lists_to_dict(keys: list, values: list) -> dict: return dict(zip(keys, values)) def templated_string_list(template_string: str, values: list, key_name="item") -> list: str_list: list = [] for value in values: format_map = {key_name: value} str_list.append(template_string.format(**format_map)) return str_list def create_secrets_list(*args) -> [platform.Secret]: secrets_list: [platform.Secrets] = [] for arg in args: if isinstance(arg, platform.Secret): secrets_list.append(arg) return secrets_list def get_source_dir() -> str: builtin: BuiltIn = BuiltIn() src_path = builtin.get_variable_value("${SUITE SOURCE}") src_dir = "/".join(src_path.split("/")[:-1]) return src_dir def create_secret(key: str, val: Any) -> platform.Secret: return platform.Secret(key, val) def merge_json_secrets(*args) -> platform.Secret: secret_data: dict = {} for secret in args: if not isinstance(secret, platform.Secret): break secret_value = secret.value if not is_json(secret_value): break secret_value = from_json(secret_value) secret_data = {**secret_data, **secret_value} secret_data = to_json(secret_data) merged_secret: platform.Secret = platform.Secret("json_secrets", secret_data) return merged_secret
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/Utils/utils.py
0.616474
0.288832
utils.py
pypi
import requests, datetime, re, time, json, os, urllib from dataclasses import dataclass from typing import Union, Optional from RW import platform from RW.Core import Core class Papi: # TODO: refactor & improve docstrings """ Papi is a keyword library that integrates with the RunWhen Public API. """ ROBOT_LIBRARY_SCOPE = "GLOBAL" def __init__(self): self.token = None self.session = None self.base_path = "/api/v3/workspaces/" self._core: Core = Core() self.base_url = self._core.import_platform_variable("RW_API_BASE_URL") def _get_session(self): if self.session: return self.session self.session = platform.get_authenticated_session() return self.session def get_workspaces(self, names_only=False): """ Fetches a list of workspaces from the RunWhen Public API. The results are scoped to the access of the service account. Examples: | RW.RunWhen.Papi.Get Workspaces | Return Value: | results | """ session = self._get_session() url = f"{self.base_url}{self.base_path}" rsp = session.get(url) if rsp.status_code == 404: rsp = [] elif not rsp.status_code == 200: raise Exception(f"unexpected response from {url}: {rsp.status_code} and {rsp.text}") else: rsp = rsp.json() if names_only and "results" in rsp: rsp = [ws_name["name"] for ws_name in rsp["results"]] return rsp def get_slxs(self, workspace, names_only=False, short_name=True): """ Fetches a list of SLXs within a workspace from the RunWhen Public API. The results are scoped to the access of the service account. Examples: | RW.RunWhen.Papi.Get Slxs | my-workspace | | RW.RunWhen.Papi.Get Slxs | my-workspace | names_only=True | Return Value: | results | """ session = self._get_session() url = f"{self.base_url}{self.base_path}{workspace}/slxs" rsp = session.get(url) if rsp.status_code == 404: rsp = [] elif not rsp.status_code == 200: raise Exception(f"unexpected response from {url}: {rsp.status_code} and {rsp.text}") else: rsp = rsp.json() if names_only and short_name and "results" not in rsp: # malformed response rsp = [] elif names_only and short_name: rsp = [slx["name"].split("--")[1] for slx in rsp["results"]] elif names_only: rsp = [slx["name"] for slx in rsp["results"]] return rsp def get_slis(self, workspace, names_only=False): """ Fetches a list of SLIs present on SLXs within a workspace from the RunWhen Public API. The results are scoped to the access of the service account. Examples: | RW.RunWhen.Papi.Get Slis | my-workspace | | RW.RunWhen.Papi.Get Slis | my-workspace | names_only=True | Return Value: | results | """ slis = [] for slx in self.get_slxs(workspace, names_only=True): rsp = self.get_sli(workspace, slx, name_only=names_only) if rsp: slis.append(rsp) return slis def get_sli(self, workspace, slx, name_only=False, short_name=True): """ Fetches a SLI under an SLX within a workspace from the RunWhen Public API. The results are scoped to the access of the service account. Examples: | RW.RunWhen.Papi.Get Sli | my-workspace | my-slx | | | RW.RunWhen.Papi.Get Sli | my-workspace | my-slx | names_only=True | Return Value: | results | """ session = self._get_session() url = f"{self.base_url}{self.base_path}{workspace}/slxs/{slx}/sli" rsp = session.get(url) if rsp.status_code == 404: rsp = [] elif not rsp.status_code == 200: raise Exception(f"unexpected response from {url}: {rsp.status_code} and {rsp.text}") else: rsp = rsp.json() if name_only and short_name and "name" in rsp: rsp = rsp["name"].split("--")[1] elif name_only and "name" in rsp: rsp = rsp["name"] # got a 404 elif "name" not in rsp: rsp = [] return rsp def get_sli_recent(self, workspace, slx, values_only=False, history: str = "5m", resolution: str = "30s"): """ Returns an SLI's recent values from the Metricstore through the RunWhen Public API. The results are scoped to the access of the service account. Examples: | RW.RunWhen.Papi.Get Sli Recent | my-workspace | my-slx | | | | | RW.RunWhen.Papi.Get Sli Recent | my-workspace | my-slx | values_only=True | history=5m | resolution=30s | Return Value: | results | """ session = self._get_session() params = { "history": history, "resolution": resolution, } url = f"{self.base_url}{self.base_path}{workspace}/slxs/{slx}/sli/recent" rsp = session.get(url, params=params) if rsp.status_code == 404: rsp = [] elif not rsp.status_code == 200: raise Exception(f"unexpected response from {url}: {rsp.status_code} and {rsp.text}") else: rsp = rsp.json() if values_only and "data" in rsp and "results" in rsp["data"]: rsp = rsp["data"]["result"][0]["values"] return rsp def get_all_recents_in_all_workspaces(self, history: str = "5m", resolution: str = "30s"): """ Returns a list of all SLI recent values across all workspaces the service account can access through the RunWhen Public API. The results are scoped to the access of the service account. Examples: | RW.RunWhen.Papi.Get All Recents In All Workspaces | names_only=True | history=5m | resolution=30s | Return Value: | results | """ all_ws_recents = {} for ws in self.get_workspaces(names_only=True): if ws not in all_ws_recents: all_ws_recents[ws] = {} all_ws_recents[ws] = self.get_all_recents_in_workspace(ws) return all_ws_recents def get_all_recents_in_workspace(self, workspace, history: str = "5m", resolution: str = "30s"): """ Returns a list of all SLI recent values in a workspaces through the RunWhen Public API. The results are scoped to the access of the service account. Examples: | RW.RunWhen.Papi.Get All Recents In Workspace | my-workspace | history=5m | resolution=30s | Return Value: | results | """ all_recents = {} for sli in self.get_slis(workspace, names_only=True): all_recents[sli] = self.get_sli_recent( workspace, sli, values_only=True, history=history, resolution=resolution ) return all_recents def validate_recent_results(self, results): """ EXPERIMENTAL TODO: finish for internal use """ failures = {} for sli, values in results.items(): if not values: failures[sli] = "no recent values" return failures def validate_all_workspace_recent_results(self, all_results): """ EXPERIMENTAL TODO: finish for internal use """ all_failures = {} for ws in self.get_workspaces(names_only=True): if ws not in all_failures: all_failures[ws] = {} if ws in all_results: all_failures[ws] = self.validate_recent_results(all_results[ws]) failure_sum = 0 all_failures["FAILURE_SUM"] = failure_sum return all_failures def get_runsessions(self, workspace=None, results_only=True): """ Returns a list of runsessions in a workspaces through the RunWhen Public API. The results are scoped to the access of the service account. Examples: | RW.RunWhen.Papi.Get Runsessions | my-workspace | Return Value: | results | """ if not workspace: workspace = self._core.import_platform_variable("RW_WORKSPACE") session = self._get_session() url = f"{self.base_url}{self.base_path}{workspace}/runsessions" rsp = session.get(url) if rsp.status_code == 404: rsp = [] elif not rsp.status_code == 200: raise Exception(f"unexpected response from {url}: {rsp.status_code} and {rsp.text}") else: rsp = rsp.json() if results_only and "results" in rsp: rsp = rsp["results"] return rsp def get_runsession(self, workspace, runsession_id=None, results_only=True): """ Returns a specific runsession in a workspaces through the RunWhen Public API. The results are scoped to the access of the service account. Examples: | RW.RunWhen.Papi.Get Runsession | my-workspace | 00001 | Return Value: | results | """ if not runsession_id: runsession_id = self._core.import_platform_variable("RW_SESSION_ID") runsession = None rsp = None if runsession_id: session = self._get_session() url = f"{self.base_url}{self.base_path}{workspace}/runsessions/{runsession_id}" rsp = session.get(url) if rsp.status_code == 404: rsp = [] elif not rsp.status_code == 200: raise Exception(f"unexpected response from {url}: {rsp.status_code} and {rsp.text}") else: rsp = rsp.json() else: ValueError("One of the following where not provided to search with: runsession_id") return rsp def get_runrequest_report( self, workspace=None, slx=None, runrequest_id=None, results_only=True, template="basic_str_template" ): """ Retrieves the report of a singular runrequest within a runsession. The total of all reports would typically be sent as part of a chat notification. The results are scoped to the access of the service account. Examples: | RW.RunWhen.Papi.Get Runrequest Report | my-workspace | this-slx | 00001 | | RW.RunWhen.Papi.Get Runrequest Report | my-workspace | this-slx | 00001 | template=console_template | Return Value: | report: str | """ if not workspace: workspace = self._core.import_platform_variable("RW_WORKSPACE") if not runrequest_id: runrequest_id = self._core.import_platform_variable("RW_RUNREQUEST_ID") session = self._get_session() url = f"{self.base_url}{self.base_path}{workspace}/slxs/{slx}/runbook/runs/{runrequest_id}/report?template={template}" rsp = session.get(url) if rsp.status_code == 404: rsp = [] elif not rsp.status_code == 200: raise Exception(f"unexpected response from {url}: {rsp.status_code} and {rsp.text}") else: rsp = rsp.json() if results_only and "report" in rsp: rsp = rsp["report"] return rsp def get_runsession_report(self, workspace=None, runsession_id=None, template="basic_str_template"): """ Retrieves the reports of all runrequests within a runsession and glues them together. The total of all reports would typically be sent as part of a chat notification. The results are scoped to the access of the service account. Examples: | RW.RunWhen.Papi.Get Runsession Report | my-workspace | 00001 | Return Value: | total_report: str | """ reports = [] if not workspace: workspace = self._core.import_platform_variable("RW_WORKSPACE") if not runsession_id: runsession_id = self._core.import_platform_variable("RW_SESSION_ID") this_slx = self._core.import_platform_variable("RW_SLX") runsession = None rsp = None if runsession_id: session = self._get_session() url = f"{self.base_url}{self.base_path}{workspace}/runsessions/{runsession_id}" rsp = session.get(url) if rsp.status_code == 404: rsp = [] elif not rsp.status_code == 200: raise Exception(f"unexpected response from {url}: {rsp.status_code} and {rsp.text}") else: rsp = rsp.json() else: ValueError("One of the following where not provided to search with: runsession_id") if "runRequests" in rsp: for rr in rsp["runRequests"]: slx = rr["slxShortName"] response_time = rr["responseTime"] runrequest_id = rr["id"] # A running slx cannot request its own report as it will always be empty if slx not in this_slx: report = "" if not response_time: report = f"Task {slx} did not complete before the report was requested. If you would like it to wait for the report use the 'Depends On Past' setting." else: report = self.get_runrequest_report( workspace, slx, runrequest_id=runrequest_id, template=template ) reports.append(report) rsp = "\n".join(reports) return rsp def get_runsession_url(self, workspace=None, runsession_id=None, runrequest_id=None): if not workspace: workspace = self._core.import_platform_variable("RW_WORKSPACE") if not runsession_id: runsession_id = self._core.import_platform_variable("RW_SESSION_ID") frontend_url = self._core.import_platform_variable("RW_FRONTEND_URL") rsp = None if runsession_id and workspace: session = self._get_session() url = f"{self.base_url}{self.base_path}{workspace}/runsessions/{runsession_id}" rsp = session.get(url) if rsp.status_code == 404: rsp = [] elif not rsp.status_code == 200: raise Exception(f"unexpected response from {url}: {rsp.status_code} and {rsp.text}") else: rsp = rsp.json() else: ValueError("One of the following where not provided to search with: workspace, runsession_id") runsession_link = "Runsession could not be found!" if "name" in rsp: params = {"cmd": f"get rs/{runsession_id}"} runsession_link = f"Platform Report For Runsession: {frontend_url}/map/{workspace}?{urllib.parse.urlencode(params, quote_via=urllib.parse.quote)}" return runsession_link def get_runsession_info(self, include_runsession_link: bool = True, include_runsession_stdout: bool = False) -> str: output: str = "" runsession_url: str = "" runsession_stdout: str = "" if not include_runsession_link and not include_runsession_stdout: return output if include_runsession_link: runsession_url = f"\n{self.get_runsession_url()}" if include_runsession_stdout: runsession_stdout = f"\n{self.get_runsession_report()}" output = f"{runsession_url}{runsession_stdout}" return output
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/RunWhen/papi.py
0.596551
0.262118
papi.py
pypi
import time, os from dataclasses import dataclass from typing import Union, Optional from RW.Utils import utils from RW import platform from datetime import datetime, timezone from dateutil.relativedelta import relativedelta from datadog_api_client import ApiClient, Configuration from datadog_api_client.v1.api.metrics_api import MetricsApi STATUS_KEY = "status" class Datadog: """ Datadog is a keyword library for integrating with Datadog product. You need to provide a Datadog API Key and a Datadog App Key to use this library. """ ROBOT_LIBRARY_SCOPE = "GLOBAL" def handle_timeseries_data(self, rsp, json_path: str = "series[0].pointlist[-1][1]") -> any: """ Takes a datadog timeseries response and extracts data from it using a jmespath json path string. Verifies the status is OK. Args: rsp (dict): the datadog timeseries response json_path (str, optional): the json path used to extract timeseries data. Defaults to "series[0].pointlist[-1][1]". Raises: Exception: raised when the status is not ok, or when no data could be extracted Returns: any: varies depending on the extracted data. """ return utils.search_json(rsp, pattern="series[0].pointlist[-1][1]") if rsp[STATUS_KEY] != "ok": raise Exception(f"status of response not ok: {rsp}") extracted_data = utils.search_json(rsp, json_path) if not extracted_data: raise Exception(f"No data could be extracted with json path: {json_path} on rsp: {rsp}") return extracted_data def metric_query( self, api_key: platform.Secret, app_key: platform.Secret, query_str: str, within_time: str = "60s", site: str = "datadoghq.com", ) -> object: """ Returns a timeseries result from the datadog metric timeseries API. You can extract data from this response using the handle_timeseries_data keyword. Args: api_key (platform.Secret): secret containing the datadog api string app_key (platform.Secret): secret containing the app key string for your app query_str (str): the datadog metric query string within_time (str, optional): the time window for the time series. Defaults to "60s". site (str, optional): which region to hit for the datadog API. Defaults to "datadoghq.com". Returns: object: the dictionary response containing the datadog timeseries data. """ # place keys into dict for client quirk - check Configuration source api_key_dict = { "apiKeyAuth": api_key.value, "appKeyAuth": app_key.value, } configuration = Configuration(server_variables={"site": site}, api_key=api_key_dict) with ApiClient(configuration) as api_client: api_instance = MetricsApi(api_client) within_time: datetime.timedelta = utils.parse_timedelta(within_time) end_time: int = int(datetime.now(timezone.utc).timestamp()) start_time: int = int(((datetime.now(timezone.utc) - within_time)).timestamp()) rsp = api_instance.query_metrics( _from=start_time, to=end_time, query=query_str, ) return rsp
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/Datadog/datadog.py
0.918352
0.270809
datadog.py
pypi
import re, kubernetes, yaml, logging from struct import unpack import dateutil.parser from benedict import benedict from typing import Optional, Union from RW import platform from enum import Enum from RW.Utils.utils import stdout_to_list logger = logging.getLogger(__name__) class K8sConnectionMixin: """ A mixin class that is used to provide other classes in the K8s keyword library with a standardized mode of communicating with Kubernetes Clusters. """ ROBOT_LIBRARY_SCOPE = "GLOBAL" # stderr that is considered a success when received from a location service (regex) # eg: when a 'kubectl get pods' returns no resources, this has a returncode of 1, and stderr, even though it's generally considered 'ok' ALLOWED_STDERR = [ # "", # Allow empty string because we may grep and filter values resulting in empty "Defaulted container", # Allow defaulting to a container in a pod "Error from server (NotFound)", "No resources found in", ] class DistributionOption(Enum): KUBERNETES = "Kubernetes" GKE = "GKE" OPENSHIFT = "OpenShift" def __init__(self): self.shell_history: list(str) = [] self.last_shell_command: str = None def clear_shell_history(self): self.shell_history = [] def pop_shell_history(self): history = self.get_shell_history() self.clear_shell_history() return history def get_shell_history(self): return self.shell_history def get_last_shell_command(self): return self.last_shell_command def get_binary_name(self, distrib_option: str) -> str: if distrib_option in [K8sConnectionMixin.DistributionOption.KUBERNETES.value, K8sConnectionMixin.DistributionOption.GKE.value]: return "kubectl" if distrib_option == K8sConnectionMixin.DistributionOption.OPENSHIFT.value: return "oc" raise ValueError(f"Could not select a valid distribution option using option: {distrib_option}") def shell( self, cmd: str, target_service: platform.Service, kubeconfig: platform.Secret, shell_secrets=[], shell_secret_files=[], ): """Execute a shell command, which can contain kubectl (or equivalent). Returns a RW.platform.ShellServiceResponse object with the stdout, stderr, returncode, etc. Args: cmd (str): an arbitrary shell command. eg: kubectl get pods | grep myapi target_service (platform.Service): which runwhen location service to use. kubeconfig (platform.Secret): a kubeconfig containing in a platform secret. shell_secrets (list(platform.Secret)): a list of platform secret values which can be accessed in the shell command with '$key'. shell_secret_files (list(platform.Secret)): a list of platform secret values to be accessible as files on the location service. Example: (in suite setup) ${kubeconfig}= RW.Import Secret kubeconfig ${kubectl}= RW.Import Service kubectl ${rsp}= RW.K8s.Shell kubectl get pods -n default ... service=${kubectl} ... kubeconfig=${kubeconfig} RW.Core.Add To Report result of kubectl cmd was ${rsp.stdout} with err ${rsp.stderr} Returns: RW.platform.ShellServiceResponse: a dataclass containing the response from the location service, including stdout. """ if not target_service: raise ValueError( "A runwhen service was not provided for the kubectl command" ) self.shell_history.append(cmd) self.last_shell_command = cmd logger.info("requesting command: %s", cmd) request_secrets: [platform.ShellServiceRequestSecret] = [] request_secrets.append(platform.ShellServiceRequestSecret(kubeconfig, as_file=True)) for shell_secret in shell_secrets: request_secrets.append(platform.ShellServiceRequestSecret(shell_secret)) for shell_secret_file in shell_secret_files: request_secrets.append(platform.ShellServiceRequestSecret(shell_secret_file, as_file=True)) env = {"KUBECONFIG": f"./{kubeconfig.key}"} rsp = platform.execute_shell_command( cmd=cmd, service=target_service, request_secrets=request_secrets, env=env ) if ( (rsp.status != 200 or rsp.returncode > 0) and rsp.stderr != "" and not any(partial_stderr in rsp.stderr for partial_stderr in K8sConnectionMixin.ALLOWED_STDERR) ): raise ValueError( f"The shell service responded with HTTP: {rsp.status} RC: {rsp.returncode} and response: {rsp}" ) logger.info("shell stdout: %s", rsp.stdout) return rsp.stdout def template_workload( self, workload_name: str, workload_namespace: str, workload_container: str ) -> str: """Take in the workload variables and construct a valid string that specifies the namespace and container. Args: workload_name (str): a workload type in which a pod can be found such as deployment/my-deployment or statefulset/my-statefulset workload_namespace (str): a kubernetes namespace or openshift project name workload_container (str): a specific container within a pod, as pods may not default to the desired container Returns: workload: a string containing the the expanded workload parameters. """ # Check if the namespace is provided in the workload name and return the vlaue verbatim if " -n" in workload_name or " --namespace" in workload_name: workload = f"{workload_name}" return workload if not workload_name: raise ValueError(f"Error: No workload is specified.") if not workload_namespace: raise ValueError(f"Error: Namespace is not specified.") if not workload_container: workload = f"{workload_name} -n {workload_namespace}" else: workload = f"{workload_name} -n {workload_namespace} -c {workload_container}" return workload def template_shell( self, cmd: str, target_service: platform.Service, kubeconfig: platform.Secret, **kwargs, ): """Similar to `shell` to run a shell command, except you may provide a templated string representing the shell command you wish to run. eg: 'kubectl get pod/{my_pod_name}' The templated string is formatted with values from **kwargs. Args: cmd (str): an arbitrary shell command. eg: kubectl get pods | grep myapi target_service (platform.Service): which runwhen location service to use. kubeconfig (platform.Secret): a kubeconfig containing in a platform secret. Returns: RW.platform.ShellServiceResponse: a dataclass containing the response from the location service, including stdout. """ logger.info("templating a shell command: %s with the kwargs: %s", cmd, kwargs) cmd = cmd.format(**kwargs) return self.shell(cmd=cmd, target_service=target_service, kubeconfig=kubeconfig) def loop_template_shell( self, items: list, cmd: str, target_service: platform.Service, kubeconfig: platform.Secret, include_empty:bool=False, newline_as_separate:bool=False, ) -> list: outputs : list = [] for item in items: output = self.template_shell( cmd, target_service, kubeconfig, item=item, ) if output or include_empty is True: if newline_as_separate: output = stdout_to_list(output, delimiter="\n") if not include_empty: output = [output_val for output_val in output if output_val] if output: outputs += output else: outputs.append(output) return outputs
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/K8s/k8s_connection_mixin.py
0.830663
0.237753
k8s_connection_mixin.py
pypi
import re, kubernetes, yaml, logging, json, jmespath from struct import unpack import dateutil.parser from benedict import benedict from typing import Optional, Union from RW import platform from RW.Utils import utils from enum import Enum from .namespace_tasks_mixin import NamespaceTasksMixin logger = logging.getLogger(__name__) class K8s( NamespaceTasksMixin ): """ K8s keyword library can be used to interact with Kubernetes clusters. """ ROBOT_LIBRARY_SCOPE = "GLOBAL" def compose_kubectl_cmd( self, kind: str, name: str = None, verb: str = "", verb_flags: str = "", label_selector: str = None, field_selector: str = None, context: str = None, namespace: str = None, output_format="yaml", binary_name: str="kubectl", **kwargs, ) -> str: command = [] command.append(f"{binary_name}") if context: command.append(f"--context {context}") if namespace: command.append(f"--namespace {namespace}") if verb and verb_flags: command.append(f"{verb} {verb_flags}") elif verb: command.append(f"{verb}") if label_selector: command.append(f"--selector {label_selector}") if kind and name and not label_selector: command.append(f"{kind}/{name}") elif kind: command.append(f"{kind}") if field_selector: command.append(f"--field-selector {field_selector}") if output_format: command.append(f"-o {output_format}") return " ".join(command) def convert_to_metric( self, command: str=None, data: str=None, search_filter: str="", calculation_field: str="", calculation: str="Count" ) -> float: """Takes in a json data result from kubectl and calculation parameters to return a single float metric. Assumes that the return is a "list" type and automatically searches through the "items" list, along with other search filters provided buy the user (using jmespath search). Args: :data str: JSON data to search through. :command str: The command used to generate the output (might be useful in expanding this function) :search_filter str: A jmespah filter used to help filter search results. See https://jmespath.org/? to test search strings. :calculation_field str: The field from the json output that calculation should be performed on/with. :calculation_type str: The type of calculation to perform. count, sum, avg. :return: A float that represents the single calculated metric. """ if utils.is_json(data) == False: raise ValueError(f"Error: Data does not appear to be valid json") else: payload=json.loads(data) # Set search prefix to narrow down results and to support simpler user input. if search_filter: search_pattern_prefix="items[?"+search_filter+"]" search_results=utils.search_json(data=payload, pattern=search_pattern_prefix) else: search_pattern_prefix="items[]" search_results=utils.search_json(data=payload, pattern="items[]") # Return count of objects if specified. if calculation == "Count": return len(search_results) if not calculation_field: raise ValueError(f"Error: Calculation field must be set for calcluations that are sum or avg.") # Check if calculation field contains rults as well as anything but a number value_test = utils.search_json(data=payload, pattern=search_pattern_prefix+"."+calculation_field) if len(value_test) == 0: raise ValueError(f"Error: Could not find value at calculation field.") if re.match("\D", str(value_test[0])): raise ValueError(f"Error: Calculation field contains string. Field must only contain values. Please verify the desired calculation field.") # Perform calculations if calculation == "Sum": metric = utils.search_json(data=payload, pattern="sum("+search_pattern_prefix+"."+calculation_field+")") return float(metric) if calculation == "Avg": metric = utils.search_json(data=payload, pattern="avg("+search_pattern_prefix+"."+calculation_field+")") return float(metric)
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/K8s/k8s.py
0.827689
0.230551
k8s.py
pypi
import logging from sdcclient import SdMonitorClient from dataclasses import dataclass from typing import Union, Optional from RW.Core import Core from RW import platform from RW.Utils import utils from RW.Utils.utils import Status from RW.Prometheus import Prometheus logger = logging.getLogger(__name__) class Sysdig: """ Sysdig is a keyword library for integrating with the Sysdig Secure and Monitor products. Note: Only Sysdig Monitor product is supported at this time. You need to provide a Sysdig region URL and a Sysdig Monitor API Token to use this library. """ ROBOT_LIBRARY_SCOPE = "GLOBAL" def __init__(self): """ Initialize prometheus client for wrapper calls. """ self._prometheus = Prometheus() def get_metrics_dict(self, token: platform.Secret, sdc_url: str, metric_filter: str = "") -> dict: """ Return a dict of metrics that describe a metric and it's possible operations. Filterable by name. Examples: | Get Metrics Dict | Return Value: | Dict of metrics | """ client = SdMonitorClient(token=token.value, sdc_url=sdc_url) rsp_ok, rsp = client.get_metrics() if not rsp_ok: raise ValueError(f"Received error response: {rsp}") filtered_rsp = {} if metric_filter: for metric_id, metric in rsp.items(): logger.debug("Comparing %s to %s", metric_filter, metric_id) if metric_filter in metric_id: filtered_rsp[metric_id] = metric rsp = filtered_rsp return rsp def get_metrics_list(self, token: platform.Secret, sdc_url: str, metric_filter: str = "") -> list: """Fetches a list of metric names available. Filterable by name. Args: token (platform.Secret): the auth token used to authenticate with the sysdig endpoint. sdc_url (str): the sysdig endpoint. metric_filter (str, optional): the value used to filter metric names with. Defaults to "". Returns: list: a list of metric names. """ metrics: dict = self.get_metrics_dict(token, sdc_url, metric_filter=metric_filter) return list(metrics.keys()) def get_metric_data( self, token: platform.Secret, sdc_url: str, query_str: str, time_window: int = 600, sampling: Optional[int] = None, data_filter: Optional[str] = None, get_most_recent: bool = True, ) -> object: """ Get the metrics given a Sysdig query. The ``time_window`` is the size of the data window. For example, 600 seconds will return the metrics seen in the past 10 minutes. ``sampling`` specifies the duration of the samples. 60 seconds sampling for a 600 seconds ``time_window`` will return 10 metrics. To return a single metric sample, don't specify a ``sampling`` value (default). ``data_filer`` is used to further fine tune the query result. ``get_most_recent`` gets the newest metric data value. Refer to the Sysdig Data API for more details - https://docs.sysdig.com/en/docs/developer-tools/working-with-the-data-api/ Examples: | ${res} = | RW.Sysdig.Get Metrics | [{"id": "cpu.used.percent", "aggregations": {"time": "timeAvg", "group": "avg"}}] | 60 | Return Values: | Metric data | """ client = SdMonitorClient(token=token.value, sdc_url=sdc_url) logger.info("Connected to endpoint: %s", sdc_url) start_time = -(time_window) end_time = 0 if sampling is None: sampling = time_window query_data = utils.from_json(query_str) logger.info("Using query data:\n %s", query_data) rsp_ok, rsp = client.get_data( query_data, start_time, end_time, sampling, data_filter, ) if not rsp_ok: raise ValueError(f"Received error response: {rsp}") metric_data = rsp logger.debug("Response metric data:\n %s", metric_data) if get_most_recent and len(rsp["data"]) > 0: metric_data = rsp["data"][-1]["d"][0] return metric_data def promql_query( self, api_url: str, query: str, target_service: platform.Service=None, optional_headers: platform.Secret=None, step="30s", seconds_in_past=60, start=None, end=None, ): """A wrapper method for the prometheus query method. This performs a Prometheus-compatible query against a sysdig promql api endpoint so that promql statements may be used to fetch metrics sitting behind sysdig. Args: api_url (str): the sysdig promql API url query (str): the promql statement to execute target_service (platform.Service, optional): A RunWhen location service if needed, used for making requests against instances in a VPC. Defaults to None. optional_headers (platform.Secret, optional): headers used when making requests against the prometheus API. Add your auth info to this.. Defaults to None. step (str, optional): interval between datapoints returned. Defaults to "30s". seconds_in_past (int, optional): How far back in the past in seconds to fetch data. Defaults to 60. start (_type_, optional): overrides seconds in past and sets the start time. Defaults to None. end (_type_, optional): overrides seconds in past and sets the end time. Defaults to None. use_unix_seconds (bool, optional): converts timestamps to unix timestamps. Can be used for varying Prometheus instance requirements. Returns: _type_: _description_ """ rsp = self._prometheus.query_range( api_url=api_url, query=query, target_service=target_service, optional_headers=optional_headers, step=step, seconds_in_past=seconds_in_past, start=start, end=end, use_unix_seconds=True ) return rsp def transform_data(self, data, method): """Performs a transform on a list of data points, sometimes this can be very simple such as fetching the last value in the list. Args: data (list): a list of data points to perform a transform on method (str): what transform operation to perform on the list of data points Returns: float: transformed data """ transformed = self._prometheus.transform_data(data, method) return transformed
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/Sysdig/Sysdig.py
0.912224
0.315736
Sysdig.py
pypi
import requests from RW import platform class StatusPage: """Used to fetch and validate data/metrics from a Uptime.com status page and its components. Returns: _type_: None """ ROBOT_LIBRARY_SCOPE = "GLOBAL" def get_component_status( self, auth_token: platform.Secret, url: str, timeout: int = 30 ) -> dict: """Returns the current operational state of a component on a status page. Refer to https://uptime.com/api/v1/docs/#/statuspages/get_component_detail for docs. Args: auth_token (platform.Secret): A Platform Secret object containing the auth token for the Uptime status page. url (str): A URL pointing to the status page's component, eg: https://uptime.com/api/v1/statuspages/{status_page_id}/components/{component_id}/ timeout (int, optional): request timeout duration. Defaults to 30. Returns: dict: a dictionary containing the current operational state converted from json contents. """ headers: dict = {"Authorization": f"token {auth_token.value}"} rsp: requests.Response = requests.get( url=url, headers=headers, timeout=timeout ) return rsp.json() def validate_component_status( self, status_data: dict, allowed_status="operational,under-maintenance" ) -> bool: """Given a component status payload, check if it's within the allowed statuses (operational, planned maintenance, etc) returning True if it is, or false if not. Args: status_data (dict): A dictionary converted from the json contents of a response. Typically from get_component_status. allowed_status (str, optional): a CSV of allowed states. Defaults to "operational,under-maintenance". Returns: bool: whether the component is in an acceptable operational state or not. """ allowed_status: list = allowed_status.split(",") if status_data["status"] in allowed_status: return True return False
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/Uptime/StatusPage.py
0.769773
0.340759
StatusPage.py
pypi
from datetime import datetime, timezone from enum import Enum from typing import Set from dateutil import parser import requests from RW.Utils.utils import parse_timedelta GITHUB_SUMMARY_PAGE = "https://www.githubstatus.com/api/v2/summary.json" class Status: """ GitHub Status keyword library """ class GitHubAvailability(Enum): NONE = "none" MINOR = "minor" MAJOR = "major" CRITICAL = "critical" GITHUB_AVAILABILITY_MAP = { GitHubAvailability.NONE: 1, GitHubAvailability.MINOR: 0.66, GitHubAvailability.MAJOR: 0.33, GitHubAvailability.CRITICAL: 0, } class GitHubComponentAvailability(Enum): OPERATIONAL = "operational" DEGRADED_PERFORMANCE = "degraded_performance" PARTIAL_OUTAGE = "partial_outage" MAJOR_OUTAGE = "major_outage" GITHUB_COMPONENT_AVAILABILITY_MAP = { GitHubComponentAvailability.OPERATIONAL: 1, GitHubComponentAvailability.DEGRADED_PERFORMANCE: 0.66, GitHubComponentAvailability.PARTIAL_OUTAGE: 0.33, GitHubComponentAvailability.MAJOR_OUTAGE: 0, } @staticmethod def _fetch_status_page(): """ Helper function which will handle basic HTTP operation of fetching GitHub status pages. Returns: Dictionary containing the values of the GitHub Status summary page """ rsp = requests.get(GITHUB_SUMMARY_PAGE, timeout=10) if not rsp.status_code == 200: raise ValueError("The GitHub Status page could not be queried") status_page = rsp.json() return status_page def get_github_availability(self, components: Set[str] = None): """ Calculates an availability metric for the GitHub platform, between 0 and 1. Optionally takes a subset of components from which to calculate this total. When no components are provided, the score is mapped from the indicator on the GitHub status page using the following values: - ``none`` : 1 - ``minor`` : 0.66 - ``major`` : 0.33 - ``critical`` : 0 If the components are provided, this function provides the average component availability score of the number of components provided in the set. These values are mapped from the component status attribute as follows: - ``operational`` : 1 - ``degraded_performance`` : 0.66 - ``partial_outage`` : 0.33 - ``major_outage`` : 0 Parameters: components (Set[str]): Set of components to optionally calculate availability score from. Current possible values at time of this release are: - "Git Operations" - "API Requests" - "Webhooks" - "Issues" - "Pull Requests" - "Actions" - "Packages" - "Pages" - "Codespaces" - "Copilot" Raises: ValueError: If the components provided do not match the list fetched from GitHub Returns: Value between 0 and 1 corresponding to the availability of the GitHub platform """ status_page = self._fetch_status_page() if components is not None: # Generate a list of valid components valid_component_names = {component_name['name'] for component_name in status_page['components']} invalid_component_list = [] # Test each input item against the list of valid components; create a list of invalid components if they exist for item in components: if item not in valid_component_names: invalid_component_list.append(item) # Fail the step and identify which component isn't valid if len(invalid_component_list) != 0: raise ValueError( f"{len(invalid_component_list)} component(s) is/are not found on the github status page. Invalid components are: {invalid_component_list} Valid components are: {valid_component_names} ;" ) # Create a list of each component and it' current status component_status = [ component for component in status_page["components"] if component["name"] in components ] component_availability_score = 0.0 # Compute the overall health score for component in component_status: component_availability_score += ( self.GITHUB_COMPONENT_AVAILABILITY_MAP[ self.GitHubComponentAvailability(component["status"]) ] ) return component_availability_score / len(components) else: return self.GITHUB_AVAILABILITY_MAP[ self.GitHubAvailability(status_page["status"]["indicator"]) ] def get_unresolved_incidents(self, impact: str = None): """ Get a list of any unresolved incidents on the GitHub platform. This function will only return incidents in the Investigating, Identified, or Monitoring state. Parameters: impact (str): Impact level to filter unresolved incidents to. Possible values are "None", "Minor", "Major", and "Critical". Filtering to a lower level will include all incidents of a higher impact level. For instance, filtering to "Minor" will include all incidents of "Minor", "Major", and "Critical". Raises: ValueError: If the impact level does not match the supported values Returns: List of all unresolved incidents on the GitHub platform, optionally filtered by impact level """ incidents = self._fetch_status_page()["incidents"] if impact is not None: incident_impact_levels = ["Critical", "Major", "Minor", "None"] filtered_impact_levels = [] try: # Include all impact levels higher than the one specified filtered_impact_levels = incident_impact_levels[ 0 : incident_impact_levels.index(impact) ] except Exception as exc: raise ValueError( f"impact {impact} must be one of: None, Minor, Major, Critical", ) from exc incidents = [ incident for incident in incidents if incident["impact"] in filtered_impact_levels ] return incidents def get_scheduled_maintenances(self, within_time: str = None): """ Get a list of any active or upcoming scheduled maintenances on the GitHub platform. Optionally can constrain this list to maintenances occuring during a specified time period. Parameters: within_time (str): String which represents a duration of time, in the format "1d7h10m", with possible unit values being 'd' representing days, 'h' representing hours, 'm' representing minutes, and 's' representing seconds. Raises: TaskError: If the within_time value does not represent a valid timedelta """ upcoming_maintenances = self._fetch_status_page()[ "scheduled_maintenances" ] if within_time is not None: upcoming_maintenances = [ maintenance for maintenance in upcoming_maintenances if parser.parse(maintenance["scheduled_for"]) < (datetime.now(timezone.utc) + parse_timedelta(within_time)) ] return upcoming_maintenances
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/GitHub/Status.py
0.867261
0.265601
Status.py
pypi
import github from github import Github from typing import Optional, Union from dataclasses import dataclass from RW.Utils import utils class GitHub: #TODO: refactor and update for platform use """ GitHub keyword library defines keywords for interacting with GitHub services. """ ROBOT_LIBRARY_SCOPE = "GLOBAL" def __init__(self): self.token = None def set_token(self, token: str) -> None: """ Set the GitHub token. If the token is set then subsequent calls to GitHub keywords such as `Create Issue` don't need to specify the token. Examples: | RW.GitHub.Set Token | ${GITHUB_TOKEN} | """ self.token = token def get_token(self) -> str: """ Return the GitHub token which was previously set using `Set Token`. Examples: | ${gh_token} = | RW.GitHub.Get Token | Return Value: | GitHub token | """ if self.token is None: utils.task_error("GitHub token is not defined.") return self.token def create_issue( self, repo_name: str, title: str, assignee: Union[str, object] = github.GithubObject.NotSet, labels: Union[str, list[str], object] = github.GithubObject.NotSet, body: Union[str, object] = github.GithubObject.NotSet, token: Optional[str] = None, ) -> object: """ Create a new GitHub issue. Examples: | ${res} = | Create Issue | my-project | Bug ABC in my-project | vui | bug | Long description... | ${gh_token} | Return Value: | GitHub issue | """ if token is None: token = self.get_token() if labels is not None: if utils.is_str(labels): labels = labels.split() g = Github(token) utils.info_log(f"create_issue repo name: {repo_name}") repo = g.get_repo(repo_name) latency, res = utils.latency( repo.create_issue, title=title, body=body, assignee=assignee, labels=labels, latency_params=[6, "s"], ) @dataclass class Result: original_content: object login: str latency: float return Result(res, res.login, latency) def get_user( self, user: Optional[str] = None, token: Optional[str] = None ) -> object: """ Get GitHub user info. Examples: | ${user} | Get User | vui | ${gh_token} | Return Value: | User info | User Info: | result.login | "lumphammer9" | | result.id | 88601986 | | result.url | "https://api.github.com/users/lumphammer9" | | result.name: | ... | """ if token is None: token = self.get_token() g = Github(token) if user is None: latency, res = utils.latency( g.get_user, latency_params=[6, "s"], ) else: latency, res = utils.latency( g.get_user, user, latency_params=[6, "s"], ) @dataclass class Result: original_content: object login: str latency: float return Result(res, res.login, latency) def get_repo( self, name: str, user: Optional[str] = None, token: Optional[str] = None, ) -> object: """ Get the GitHub repository with the given name. Examples: | ${repo} = | Get Repo | my-app | token=${gh_token} | Return Value: | Repo data | """ if token is None: token = self.get_token() g = Github(token) if user is None: latency, res = utils.latency( g.get_user().get_repo, name, latency_params=[6, "s"], ) else: latency, res = utils.latency( g.get_user(user).get_repo, name, latency_params=[6, "s"], ) @dataclass class Result: original_content: object full_name: str url: str latency: float return Result(res, res.full_name, res.url, latency) def get_repos(self, token: Optional[str] = None) -> object: """ Get all the repositories found in GitHub. Examples: | ${repo} = | Get Repos | token=${gh_token} | Return Value: | List of repo data | TBD :return: List of repo names """ if token is None: token = self.get_token() g = Github(token) latency, res = utils.latency( g.get_user().get_repos, latency_params=[6, "s"], ) @dataclass class Result: original_content: object repos: list[str] latency: float return Result(res, [repo.name for repo in res], latency)
/runwhen_keywords-0.0.1.tar.gz/runwhen_keywords-0.0.1/RW/GitHub/__init__.py
0.666605
0.287568
__init__.py
pypi