code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from builtins import range
import numpy as np
import pandas as pd
from sklearn import preprocessing
class Standardize(object):
"""
Standardize all columns where the values are numerical.
Parameters
----------
strategy: string, optional (default='normalize')
available options: 'normalize', 'scale', and 'standardize'
Values of each column will be standardized based on the choosen strategy.
"""
def __init__(self, strategy='normalize'):
self.strategy = strategy
# def find_columns(df):
# """
# Find the columns for normalization
# Parameters
# ----------
# df: pandas dataframe
# input dataframe
# Returns
# -------
# A list of column names
# """
# standardize_columns
def transform(self, df):
"""
Standardize columns that are in the encode_columns attribute of the previous find_columns method
Parameters
----------
df: pandas dataframe
input dataframe
Returns
-------
transformed dataframe
"""
if self.strategy == 'normalize':
numeric_data= df.select_dtypes(include=['float64', 'int64']) #DataFrame withonly numeric data
x = numeric_data.values #returns a numpy array with numerical data
#norm = preprocessing.normalize()
x_scaled = preprocessing.normalize(x, norm= 'l2', axis= 0)
normalized_data = pd.DataFrame(x_scaled, columns=numeric_data.columns)
df.update(normalized_data) #updates the original DataFrame with updated column values
return df
elif self.strategy == 'scale':
numeric_data= df.select_dtypes(include=['float64', 'int64']) #DataFrame withonly numeric data
x = numeric_data.values #returns a numpy array with numerical data
x_scaled = preprocessing.scale(x)
normalized_data = pd.DataFrame(x_scaled, columns=numeric_data.columns)
df.update(normalized_data) #updates the original DataFrame with updated column values
return df
elif self.strategy == 'MinMax':
norm_data = df.copy()
numeric_data= norm_data.select_dtypes(include=['float64', 'int64']) #DataFrame withonly numeric data
x = numeric_data.values #returns a numpy array with numerical data
#print(x)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
normalized_data = pd.DataFrame(x_scaled, columns=numeric_data.columns)
norm_data.update(normalized_data) #updates the original DataFrame with updated column values
norm_data1=norm_data
return norm_data1 | /run_regression-0.7.tar.gz/run_regression-0.7/run_regression/data_preprocessing/normalize_numerical_columns.py | 0.777891 | 0.620765 | normalize_numerical_columns.py | pypi |
import pathlib
from typing import Any, Callable, List, Optional
from absl import flags
_QUIET = flags.DEFINE_bool(
'quiet', False, 'Don\'t prompt for user confirmation, go with the default '
'option automatically')
def bool_prompt(prompt: str, default_opt: str = '') -> bool:
assert default_opt in ['', 'y', 'n']
if _QUIET.value and default_opt:
return default_opt == 'y'
y = 'Y' if default_opt == 'y' else 'y'
n = 'N' if default_opt == 'n' else 'n'
response = input(f'{prompt} ({y}/{n}): ')
if not response:
response = default_opt
response = response.lower()
if response == 'y':
return True
elif response == 'n':
return False
print('Please choose "y" or "n".')
return bool_prompt(prompt)
def numbered_options_prompt(
prompt: str, options: List[Any], default: int = 0) -> int:
"""Prompt with a numbered list of options."""
if _QUIET.value:
return default
# Setup
assert options
print(prompt)
for i, p in options:
print(f'{i}: {p}')
response = input(f'[{default}]: ')
# Convert.
if not response:
return 0
response = int(response)
# Validation.
if response >= len(options):
print(f'Must be between 0 and {len(options) - 1}')
return numbered_options_prompt(prompt, options, default)
return response
def path_prompt(
prompt: str,
default_path: Optional[pathlib.Path],
validation: Callable[[Optional[pathlib.Path]], bool] = lambda x: True
) -> pathlib.Path:
"""Prompt for filesystem path."""
if _QUIET.value and default_path:
return default_path
# Setup.
if default_path:
response = input(f'{prompt}\n[{default_path}]: ')
else:
response = input(f'{prompt}: ')
# Convert.
if not response and default_path:
response = default_path
else:
response = pathlib.Path(response)
# Validation.
if not validation(response):
return path_prompt(prompt, default_path, validation)
return response
def string_prompt(
prompt: str,
default_str: Optional[str] = None,
validation: Callable[[Optional[str]], bool] = lambda x: True
) -> str:
if _QUIET.value and default_str:
return default_str
# Setup.
if default_str:
response = input(f'{prompt}\n[{default_str}]: ')
else:
response = input(f'{prompt}: ')
# Convert.
if not response and default_str:
response = default_str
# Validation.
if not validation(response):
return string_prompt(prompt, default_str, validation)
return response | /run_rx-0.0.11.tar.gz/run_rx-0.0.11/rx/client/menu.py | 0.713132 | 0.266046 | menu.py | pypi |
import functools
import itertools
import shlex
import subprocess
import time
__version__ = '0.9.1'
_SUBPROCESS_KWDS = {
'shell': False,
'stderr': subprocess.PIPE,
'stdout': subprocess.PIPE,
}
def run(cmd, out=print, err=None, sleep=0, count=None, **kwds):
"""
Run a subprocess, read its stdin and stderr, and send them to error and
output callbacks.
Returns the integer error code that the subprocess returned.
cmd:
A list or tuple of strings, or a string that is split using shlex
out:
The callback for stdout from the subprocess
err:
The callback for stderr from the subprocess
sleep:
How long to sleep between checking the process
count:
Maximum number lines to retrieve at a time, from stdout or stderr
kwds:
Keywords that are passed to subprocess.Popen
"""
err = err or out
kwds = dict(_SUBPROCESS_KWDS, **kwds)
if kwds.get('shell'):
if not isinstance(cmd, str):
cmd = ' '.join(cmd)
elif isinstance(cmd, str):
cmd = shlex.split(cmd)
with subprocess.Popen(cmd, **kwds) as p:
read = functools.partial(read_lines, count=count)
while read(p.stdout, out) or read(p.stderr, err) or p.poll() is None:
if sleep:
time.sleep(sleep)
return p.returncode
def run_to_list(cmd, **kwds):
"""
Redirect the stdout of a subprocess to a list, and return that list.
If the parameter `err` is not set, then error messages will also be
added to the list.
"""
out = []
return run(cmd, out=out.append, **kwds), out
def read_lines(stream, callback, count=None):
"""
Reads lines of text from a stream and sends them to a callback, until the
stream blocks.
Returns the number of lines read.
If `count` is not None, at most `count` lines are read.
"""
for i in itertools.count() if count is None else range(count):
line = stream.readline()
if line:
callback(line.decode('utf-8').rstrip('\n'))
else:
return i
return i + 1 | /run_subprocess-0.9.1.tar.gz/run_subprocess-0.9.1/run_subprocess.py | 0.59561 | 0.181844 | run_subprocess.py | pypi |
from __future__ import annotations
import abc
import numbers
import jax.numpy as jnp
from jax import grad, jit
class ConstraintModule(abc.ABC):
"""Base class implementation for safety constraints
Implements constraint with form of h(x) >= 0
Parameters
----------
alpha : ConstraintStrengthener
Constraint Strengthener object used for ASIF methods. Required for ASIF methods.
"""
def __init__(self, alpha: ConstraintStrengthener = None):
assert isinstance(alpha, ConstraintStrengthener), "alpha must be an instance/sub-class of ConstraintStrenthener"
self._alpha = alpha
self._compose()
def _compose(self):
self._compute_fn = jit(self._compute)
self._grad_fn = jit(grad(self._compute))
def __call__(self, state: jnp.ndarray) -> float:
"""Evaluates constraint function h(x)
Considered satisfied when h(x) >= 0
Parameters
----------
state : jnp.ndarray
current rta state of the system
Returns
-------
float:
result of inequality constraint function
"""
return self._compute_fn(state)
def compute(self, state: jnp.ndarray) -> float:
"""Evaluates constraint function h(x)
Considered satisfied when h(x) >= 0
Parameters
----------
state : jnp.ndarray
current rta state of the system
Returns
-------
float:
result of inequality constraint function
"""
return self._compute_fn(state)
@abc.abstractmethod
def _compute(self, state: jnp.ndarray) -> float:
"""Custom implementation of constraint function
!!! Note: To be compatible with jax jit compilation, must not rely on external states that are overwritten here
or elsewhere after initialization
Parameters
----------
state : jnp.ndarray
current rta state of the system
Returns
-------
float:
result of inequality constraint function
"""
raise NotImplementedError()
def grad(self, state: jnp.ndarray) -> jnp.ndarray:
"""
Computes Gradient of Safety Constraint Function wrt x
Required for ASIF methods
Parameters
----------
state : jnp.ndarray
current rta state of the system
Returns
-------
jnp.ndarray:
gradient of constraint function wrt x. Shape of (n, n) where n = state.vector.size.
"""
return self._grad_fn(state)
def alpha(self, x: float) -> float:
"""Evaluates Strengthing function to soften Nagumo's condition outside of constraint set boundary
Pass through for class member alpha
Parameters
----------
x : float
output of constraint function
Returns
-------
float
Strengthening Function output
"""
if self._alpha is None:
return None
return self._alpha(x)
class ConstraintStrengthener(abc.ABC):
"""Strengthing function used to soften Nagumo's condition outside of constraint set boundary
Required for ASIF methods
"""
@abc.abstractmethod
def __call__(self, x) -> float:
"""
Compute Strengthening Function (Required for ASIF):
Must be monotonically decreasing with f(0) = 0
!!! Note: To be compatible with jax jit compilation, must not rely on external states that are overwritten here
or elsewhere after initialization
Returns
-------
float
output of monotonically decreasing constraint strengther function
"""
raise NotImplementedError
class ConstraintMagnitudeStateLimit(ConstraintModule):
"""
Generic state vector element magnitude limit constraint
Builds a constraint function for |state[state_index]| <= limit_val
Parameters
----------
limit_val : float
state vector element limit constraint value
state_index: int
index/indices of state vector element to apply limit constraint to
Currently only supports single indices
alpha : ConstraintStrengthener
Constraint Strengthener object used for ASIF methods. Required for ASIF methods.
Defaults to PolynomialConstraintStrengthener([0, 0.0005, 0, 0.001])
"""
def __init__(self, limit_val: float, state_index: int, alpha: ConstraintStrengthener = None):
self.limit_val = limit_val
self.state_index = state_index
if alpha is None:
alpha = PolynomialConstraintStrengthener([0, 0.0005, 0, 0.001])
super().__init__(alpha=alpha)
def _compute(self, state: jnp.ndarray) -> float:
return self.limit_val**2 - state[self.state_index]**2
class ConstraintMaxStateLimit(ConstraintModule):
"""
Generic state vector element maximum limit constraint
Builds a constraint function for state[state_index] <= limit_val
Parameters
----------
limit_val : float
state vector element limit constraint value
state_index: int
index/indices of state vector element to apply limit constraint to
Currently only supports single indices
alpha : ConstraintStrengthener
Constraint Strengthener object used for ASIF methods. Required for ASIF methods.
Defaults to PolynomialConstraintStrengthener([0, 0.0005, 0, 0.001])
"""
def __init__(self, limit_val: float, state_index: int, alpha: ConstraintStrengthener = None):
self.limit_val = limit_val
self.state_index = state_index
if alpha is None:
alpha = PolynomialConstraintStrengthener([0, 0.0005, 0, 0.001])
super().__init__(alpha=alpha)
def _compute(self, state: jnp.ndarray) -> float:
return self.limit_val - state[self.state_index]
class ConstraintMinStateLimit(ConstraintModule):
"""
Generic state vector element minimum limit constraint
Builds a constraint function for state[state_index] >= limit_val
Parameters
----------
limit_val : float
state vector element limit constraint value
state_index: int
index/indices of state vector element to apply limit constraint to
Currently only supports single indices
alpha : ConstraintStrengthener
Constraint Strengthener object used for ASIF methods. Required for ASIF methods.
Defaults to PolynomialConstraintStrengthener([0, 0.0005, 0, 0.001])
"""
def __init__(self, limit_val: float, state_index: int, alpha: ConstraintStrengthener = None):
self.limit_val = limit_val
self.state_index = state_index
if alpha is None:
alpha = PolynomialConstraintStrengthener([0, 0.0005, 0, 0.001])
super().__init__(alpha=alpha)
def _compute(self, state: jnp.ndarray) -> float:
return state[self.state_index] - self.limit_val
class PolynomialConstraintStrengthener(ConstraintStrengthener):
"""Implements strengthing function as polynomial function of x
Parameters
----------
coefs: list
list of polynomial coefs. Arbitrary length.
Results in strengthening function sum(coefs[i]*(x**i)) for i in range(0, len(coefs))
"""
def __init__(self, coefs: list = None):
assert isinstance(coefs, list) or coefs is None, "coefs must be a list of numbers"
assert coefs is None or all((isinstance(i, numbers.Number) for i in coefs)), "coefs must be a list of numbers"
if coefs is None:
coefs = [0, 1]
self.coefs = coefs
def __call__(self, x: float) -> float:
"""Evaluates strengthening function
Parameters
----------
x : float
output of inequality constraint function h(x)
Returns
-------
float
output of monotonically decreasing constraint strengther function
"""
output = 0
for n, c in enumerate(self.coefs):
output += c * x**n
return output | /run-time-assurance-1.0.1.tar.gz/run-time-assurance-1.0.1/run_time_assurance/constraint.py | 0.940538 | 0.562777 | constraint.py | pypi |
from __future__ import annotations
import abc
from typing import Dict, Union
import jax.numpy as jnp
from jax import jacfwd, jit
class RTABackupController(abc.ABC):
"""Base Class for backup controllers used by backup control based RTA methods
"""
def __init__(self, controller_state_initial: Union[jnp.ndarray, Dict[str, jnp.ndarray], None] = None):
self.controller_state_initial = self._copy_controller_state(controller_state_initial)
self.controller_state_saved = None
self.controller_state = self._copy_controller_state(self.controller_state_initial)
self._compose()
def _compose(self):
self._jacobian = jit(
jacfwd(self._generate_control, has_aux=True), static_argnums=[1, 2], static_argnames=['step_size', 'controller_state']
)
self._generate_control_fn = jit(self._generate_control, static_argnames=['step_size', 'controller_state'])
def reset(self):
"""Resets the backup controller to its initial state for a new episode
"""
self.controller_state = self._copy_controller_state(self.controller_state_initial)
self._compose()
def _copy_controller_state(self, controller_state: Union[jnp.ndarray, Dict[str, jnp.ndarray], None]):
if controller_state is None:
copied_state = None
elif isinstance(controller_state, jnp.ndarray):
copied_state = jnp.copy(controller_state)
elif isinstance(controller_state, dict):
copied_state = {k: jnp.copy(v) for k, v in controller_state.items()}
else:
raise TypeError("controller_state to copy must be one of (jnp.ndarray, Dict[str,jnp.ndarray], None)")
return copied_state
def generate_control(self, state: jnp.ndarray, step_size: float) -> jnp.ndarray:
"""Generates safe backup control given the current state and step size
Parameters
----------
state : jnp.ndarray
current rta state of the system
step_size : float
time duration over which backup control action will be applied
Returns
-------
jnp.ndarray
control vector
"""
controller_output = self._generate_control_fn(state, step_size, self.controller_state)
if (not isinstance(controller_output, tuple)) or len(controller_output) != 2:
raise ValueError('_generate_control should return 2 values: the control vector and the updated controller state')
control, self.controller_state = controller_output
return control
def generate_control_with_controller_state(
self,
state: jnp.ndarray,
step_size: float,
controller_state: Union[jnp.ndarray, Dict[str, jnp.ndarray]] = None
) -> tuple[jnp.ndarray, Union[jnp.ndarray, Dict[str, jnp.ndarray], None]]:
"""Generates safe backup control given the current state, step_size, and internal controller state
Note that in order to be compatible with jax differentiation and jit compiler, all states that are modified by
generating control must be contained within the controller_state
Public interface for _generate_control
Parameters
----------
state : jnp.ndarray
current rta state of the system
step_size : float
time duration over which backup control action will be applied
controller_state: jnp.ndarray or Dict[str, jnp.ndarray] or None
internal controller state. For stateful controllers, all states that are modified in the control computation
(e.g. integral control error buffers) must be contained within controller_state
Returns
-------
jnp.ndarray
control vector
jnp.ndarray or Dict[str, jnp.ndarray] or None
Updated controller_state modified by the control algorithm
If no internal controller_state is used, return None
"""
return self._generate_control_fn(state, step_size, controller_state)
@abc.abstractmethod
def _generate_control(
self,
state: jnp.ndarray,
step_size: float,
controller_state: Union[jnp.ndarray, Dict[str, jnp.ndarray]] = None
) -> tuple[jnp.ndarray, Union[jnp.ndarray, Dict[str, jnp.ndarray], None]]:
"""Generates safe backup control given the current state, step_size, and internal controller state
Note that in order to be compatible with jax differentiation and jit compiler, all states that are modified by
generating control must be contained within the controller_state
Parameters
----------
state : jnp.ndarray
current rta state of the system
step_size : float
time duration over which backup control action will be applied
controller_state: jnp.ndarray or Dict[str, jnp.ndarray] or None
internal controller state. For stateful controllers, all states that are modified in the control computation
(e.g. integral control error buffers) must be contained within controller_state
Returns
-------
jnp.ndarray
control vector
jnp.ndarray or Dict[str, jnp.ndarray] or None
Updated controller_state modified by the control algorithm
If no internal controller_state is used, return None
"""
raise NotImplementedError()
def jacobian(self, state: jnp.ndarray, step_size: float):
"""Computes the jacobian of the of the backup controller control output with respect to the input state
Parameters
----------
state : jnp.ndarray
current rta state of the system
step_size : float
time duration over which backup control action will be applied
Returns
-------
jnp.ndarray
jacobian matrix
"""
return self._jacobian(state, step_size, self.controller_state)[0]
def save(self):
"""Save the internal state of the backup controller
Allows trajectory integration with a stateful backup controller
"""
self.controller_state_saved = self._copy_controller_state(self.controller_state)
def restore(self):
"""Restores the internal state of the backup controller from the last save
Allows trajectory integration with a stateful backup controller
!!! Note stateful backup controllers are not compatible with jax jit compilation
"""
self.controller_state = self.controller_state_saved | /run-time-assurance-1.0.1.tar.gz/run-time-assurance-1.0.1/run_time_assurance/controller.py | 0.956094 | 0.498718 | controller.py | pypi |
from functools import partial
import jax.numpy as jnp
import numpy as np
from jax import jit
@partial(jit, static_argnames=['delta'])
def norm_with_delta(x: jnp.ndarray, delta: float):
"""
Computes the norm of the vector with a small positive delta factor added inside the square root.
Allows norm function to be differentiable at x = 0
Parameters
----------
x : jnp.ndarray
input vector to compute norm of
delta : float
Small positive delta value to add offset to norm square root
Returns
-------
float
vector norm value
"""
return jnp.sqrt(jnp.sum(jnp.square(x)) + delta)
@jit
def to_jnp_array_jit(x: np.ndarray) -> jnp.ndarray:
"""
Converts a numpy array to a jax numpy array with a jit compiled function
Allows significantly faster jax numpy array conversion when called repeatedly with an input of the same shape
Parameters
----------
x : np.ndarray
input numpy array to be converted
Returns
-------
jnp.ndarray
jax numpy version of the input array
"""
return jnp.array(x)
@partial(jit, static_argnames=['axis'])
def jnp_stack_jit(arrays, axis: int = 0) -> jnp.ndarray:
"""Apples a jit compiled version of jax numpy stack
Parameters
----------
arrays : Sequence of array_likes
Array to be stacked together into a single jnp ndarray
axis : int, optional
axis across which to stack arrays, by default 0
Returns
-------
jnp.ndarray
stack array of input array sequence
"""
return jnp.stack(arrays, axis=axis)
@jit
def add_dim_jit(x: jnp.ndarray) -> jnp.ndarray:
"""
Add a dimension to a 1d jax array
Parameters
----------
x : np.ndarray
input array of shape (N,)
Returns
-------
jnp.ndarray
output array of shape (1, N)
"""
return x[None, :]
class SolverError(Exception):
"""Exception for when solver does not find a solution
"""
def __str__(self):
return "SolverError: Solver could not find a solution"
class SolverWarning(UserWarning):
"""Warning for when solver does not find a solution
"""
def __str__(self):
return "**Warning! Solver could not find a solution, passing desired control**" | /run-time-assurance-1.0.1.tar.gz/run-time-assurance-1.0.1/run_time_assurance/utils.py | 0.944035 | 0.767559 | utils.py | pypi |
from collections import OrderedDict
from typing import Dict, Tuple, Union
import jax.numpy as jnp
import numpy as np
import scipy
from safe_autonomy_dynamics.base_models import BaseLinearODESolverDynamics
from safe_autonomy_dynamics.cwh import M_DEFAULT, N_DEFAULT, generate_cwh_matrices
from run_time_assurance.constraint import (
ConstraintMagnitudeStateLimit,
ConstraintModule,
ConstraintStrengthener,
PolynomialConstraintStrengthener,
)
from run_time_assurance.controller import RTABackupController
from run_time_assurance.rta import ExplicitASIFModule, ExplicitSimplexModule, ImplicitASIFModule, ImplicitSimplexModule
from run_time_assurance.state import RTAStateWrapper
from run_time_assurance.utils import norm_with_delta, to_jnp_array_jit
from run_time_assurance.zoo.cwh.docking_2d import V0_DEFAULT, X_VEL_LIMIT_DEFAULT, Y_VEL_LIMIT_DEFAULT
Z_VEL_LIMIT_DEFAULT = 10
V1_COEF_DEFAULT = 4
V1_DEFAULT = V1_COEF_DEFAULT * N_DEFAULT
class Docking3dState(RTAStateWrapper):
"""RTA state for cwh docking 3d RTA"""
@property
def x(self) -> float:
"""Getter for x position"""
return self.vector[0]
@x.setter
def x(self, val: float):
"""Setter for x position"""
self.vector[0] = val
@property
def y(self) -> float:
"""Getter for y position"""
return self.vector[1]
@y.setter
def y(self, val: float):
"""Setter for y position"""
self.vector[1] = val
@property
def z(self) -> float:
"""Getter for z position"""
return self.vector[2]
@z.setter
def z(self, val: float):
"""Setter for z position"""
self.vector[2] = val
@property
def x_dot(self) -> float:
"""Getter for x velocity component"""
return self.vector[3]
@x_dot.setter
def x_dot(self, val: float):
"""Setter for x velocity component"""
self.vector[3] = val
@property
def y_dot(self) -> float:
"""Getter for y velocity component"""
return self.vector[4]
@y_dot.setter
def y_dot(self, val: float):
"""Setter for y velocity component"""
self.vector[4] = val
@property
def z_dot(self) -> float:
"""Getter for z velocity component"""
return self.vector[5]
@z_dot.setter
def z_dot(self, val: float):
"""Setter for z velocity component"""
self.vector[5] = val
class Docking3dRTAMixin:
"""Mixin class provides 3D docking RTA util functions
Must call mixin methods using the RTA interface methods
"""
def _setup_docking_properties(self, m: float, n: float, v1_coef: float, jit_compile_dict: Dict[str, bool], integration_method: str):
"""Initializes docking specific properties from other class members"""
self.v1 = v1_coef * n
A, B = generate_cwh_matrices(m, n, mode="3d")
self.A = jnp.array(A)
self.B = jnp.array(B)
self.dynamics = BaseLinearODESolverDynamics(A=A, B=B, integration_method=integration_method)
if integration_method == 'RK45':
jit_compile_dict.setdefault('pred_state', False)
jit_compile_dict.setdefault('integrate', False)
if jit_compile_dict.get('pred_state'):
raise ValueError('pred_state uses RK45 integration and can not be compiled using jit')
if jit_compile_dict.get('integrate'):
raise ValueError('integrate uses RK45 integration and can not be compiled using jit')
elif integration_method == 'Euler':
jit_compile_dict.setdefault('pred_state', True)
jit_compile_dict.setdefault('integrate', True)
else:
raise ValueError('integration_method must be either RK45 or Euler')
def _setup_docking_constraints(self, v0: float, v1: float, x_vel_limit: float, y_vel_limit: float, z_vel_limit: float) -> OrderedDict:
"""generates constraints used in the docking problem"""
return OrderedDict(
[
('rel_vel', ConstraintDocking3dRelativeVelocity(v0=v0, v1=v1)),
('x_vel', ConstraintMagnitudeStateLimit(limit_val=x_vel_limit, state_index=3)),
('y_vel', ConstraintMagnitudeStateLimit(limit_val=y_vel_limit, state_index=4)),
('z_vel', ConstraintMagnitudeStateLimit(limit_val=z_vel_limit, state_index=5)),
]
)
def _docking_pred_state(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray, integration_method: str) -> jnp.ndarray:
"""Predicts the next state given the current state and control action"""
if integration_method == 'RK45':
next_state_vec, _ = self.dynamics.step(step_size, np.array(state), np.array(control))
out = to_jnp_array_jit(next_state_vec)
elif integration_method == 'Euler':
state_dot = self._docking_f_x(state) + self._docking_g_x(state) @ control
out = state + state_dot * step_size
else:
raise ValueError('integration_method must be either RK45 or Euler')
return out
def _docking_f_x(self, state: jnp.ndarray) -> jnp.ndarray:
"""Computes the system contribution to the state transition: f(x) of dx/dt = f(x) + g(x)u"""
return self.A @ state
def _docking_g_x(self, _: jnp.ndarray) -> jnp.ndarray:
"""Computes the control input contribution to the state transition: g(x) of dx/dt = f(x) + g(x)u"""
return jnp.copy(self.B)
class Docking3dExplicitSwitchingRTA(ExplicitSimplexModule, Docking3dRTAMixin):
"""Implements Explicit Switching RTA for the 3d Docking problem
Parameters
----------
m : float, optional
mass in kg of spacecraft, by default M_DEFAULT
n : float, optional
orbital mean motion in rad/s of current Hill's reference frame, by default N_DEFAULT
v0 : float, optional
Maximum safe docking velocity in m/s, by default V0_DEFAULT
v0 of v_limit = v0 + v1*n*||r||
v1_coef : float, optional
coefficient of linear component of the distance depending speed limit in 1/seconds, by default V1_COEF_DEFAULT
v1_coef of v_limit = v0 + v1_coef*n*||r||
x_vel_limit : float, optional
max velocity magnitude in the x direction, by default X_VEL_LIMIT_DEFAULT
y_vel_limit : float, optional
max velocity magnitude in the y direction, by default Y_VEL_LIMIT_DEFAULT
z_vel_limit : float, optional
max velocity magnitude in the z direction, by default Z_VEL_LIMIT_DEFAULT
control_bounds_high : Union[float, list, np.ndarray, jnp.ndarray], optional
upper bound of allowable control. Pass a list for element specific limit. By default 1
control_bounds_low : Union[float, list, np.ndarray, jnp.ndarray], optional
lower bound of allowable control. Pass a list for element specific limit. By default -1
backup_controller : RTABackupController, optional
backup controller object utilized by rta module to generate backup control.
By default Docking2dStopLQRBackupController
jit_compile_dict: Dict[str, bool], optional
Dictionary specifying which subroutines will be jax jit compiled. Behavior defined in self.compose()
integration_method: str, optional
Integration method to use, either 'RK45' or 'Euler'
"""
def __init__(
self,
*args,
m: float = M_DEFAULT,
n: float = N_DEFAULT,
v0: float = V0_DEFAULT,
v1_coef: float = V1_COEF_DEFAULT,
x_vel_limit: float = X_VEL_LIMIT_DEFAULT,
y_vel_limit: float = Y_VEL_LIMIT_DEFAULT,
z_vel_limit: float = Z_VEL_LIMIT_DEFAULT,
control_bounds_high: float = 1,
control_bounds_low: float = -1,
backup_controller: RTABackupController = None,
jit_compile_dict: Dict[str, bool] = None,
integration_method: str = 'RK45',
**kwargs
):
self.m = m
self.n = n
self.v0 = v0
self.v1_coef = v1_coef
self.x_vel_limit = x_vel_limit
self.y_vel_limit = y_vel_limit
self.z_vel_limit = z_vel_limit
self.integration_method = integration_method
if backup_controller is None:
backup_controller = Docking3dStopLQRBackupController(m=self.m, n=self.n)
if jit_compile_dict is None:
jit_compile_dict = {'constraint_violation': True}
super().__init__(
*args,
control_bounds_high=control_bounds_high,
control_bounds_low=control_bounds_low,
backup_controller=backup_controller,
jit_compile_dict=jit_compile_dict,
**kwargs
)
def _setup_properties(self):
self._setup_docking_properties(self.m, self.n, self.v1_coef, self.jit_compile_dict, self.integration_method)
def _setup_constraints(self) -> OrderedDict:
return self._setup_docking_constraints(self.v0, self.v1, self.x_vel_limit, self.y_vel_limit, self.z_vel_limit)
def _pred_state(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray) -> jnp.ndarray:
return self._docking_pred_state(state, step_size, control, self.integration_method)
class Docking3dImplicitSwitchingRTA(ImplicitSimplexModule, Docking3dRTAMixin):
"""Implements Implicit Switching RTA for the 3d Docking problem
Parameters
----------
backup_window : float
Duration of time in seconds to evaluate backup controller trajectory
m : float, optional
mass in kg of spacecraft, by default M_DEFAULT
n : float, optional
orbital mean motion in rad/s of current Hill's reference frame, by default N_DEFAULT
v0 : float, optional
Maximum safe docking velocity in m/s, by default V0_DEFAULT
v0 of v_limit = v0 + v1*n*||r||
v1_coef : float, optional
coefficient of linear component of the distance depending speed limit in 1/seconds, by default V1_COEF_DEFAULT
v1_coef of v_limit = v0 + v1_coef*n*||r||
x_vel_limit : float, optional
max velocity magnitude in the x direction, by default X_VEL_LIMIT_DEFAULT
y_vel_limit : float, optional
max velocity magnitude in the y direction, by default Y_VEL_LIMIT_DEFAULT
z_vel_limit : float, optional
max velocity magnitude in the z direction, by default Z_VEL_LIMIT_DEFAULT
control_bounds_high : Union[float, np.ndarray], optional
upper bound of allowable control. Pass a list for element specific limit. By default 1
control_bounds_low : Union[float, np.ndarray], optional
lower bound of allowable control. Pass a list for element specific limit. By default -1
backup_controller : RTABackupController, optional
backup controller object utilized by rta module to generate backup control.
By default Docking2dStopLQRBackupController
jit_compile_dict: Dict[str, bool], optional
Dictionary specifying which subroutines will be jax jit compiled. Behavior defined in self.compose()
integration_method: str, optional
Integration method to use, either 'RK45' or 'Euler'
"""
def __init__(
self,
*args,
backup_window: float = 5,
m: float = M_DEFAULT,
n: float = N_DEFAULT,
v0: float = V0_DEFAULT,
v1_coef: float = V1_COEF_DEFAULT,
x_vel_limit: float = X_VEL_LIMIT_DEFAULT,
y_vel_limit: float = Y_VEL_LIMIT_DEFAULT,
z_vel_limit: float = Z_VEL_LIMIT_DEFAULT,
control_bounds_high: float = 1,
control_bounds_low: float = -1,
backup_controller: RTABackupController = None,
jit_compile_dict: Dict[str, bool] = None,
integration_method: str = 'RK45',
**kwargs
):
self.m = m
self.n = n
self.v0 = v0
self.v1_coef = v1_coef
self.x_vel_limit = x_vel_limit
self.y_vel_limit = y_vel_limit
self.z_vel_limit = z_vel_limit
self.integration_method = integration_method
if backup_controller is None:
backup_controller = Docking3dStopLQRBackupController(m=self.m, n=self.n)
if jit_compile_dict is None:
jit_compile_dict = {'constraint_violation': True}
super().__init__(
*args,
backup_window=backup_window,
backup_controller=backup_controller,
control_bounds_high=control_bounds_high,
control_bounds_low=control_bounds_low,
jit_compile_dict=jit_compile_dict,
**kwargs
)
def _setup_properties(self):
self._setup_docking_properties(self.m, self.n, self.v1_coef, self.jit_compile_dict, self.integration_method)
def _setup_constraints(self) -> OrderedDict:
return self._setup_docking_constraints(self.v0, self.v1, self.x_vel_limit, self.y_vel_limit, self.z_vel_limit)
def _pred_state(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray) -> jnp.ndarray:
return self._docking_pred_state(state, step_size, control, self.integration_method)
class Docking3dExplicitOptimizationRTA(ExplicitASIFModule, Docking3dRTAMixin):
"""
Implements Explicit Optimization RTA for the 3d Docking problem
Utilizes Explicit Active Set Invariance Function algorithm
Parameters
----------
m : float, optional
mass in kg of spacecraft, by default M_DEFAULT
n : float, optional
orbital mean motion in rad/s of current Hill's reference frame, by default N_DEFAULT
v0 : float, optional
Maximum safe docking velocity in m/s, by default V0_DEFAULT
v0 of v_limit = v0 + v1*n*||r||
v1_coef : float, optional
coefficient of linear component of the distance depending speed limit in 1/seconds, by default V1_COEF_DEFAULT
v1_coef of v_limit = v0 + v1_coef*n*||r||
x_vel_limit : float, optional
max velocity magnitude in the x direction, by default X_VEL_LIMIT_DEFAULT
y_vel_limit : float, optional
max velocity magnitude in the y direction, by default Y_VEL_LIMIT_DEFAULT
z_vel_limit : float, optional
max velocity magnitude in the z direction, by default Z_VEL_LIMIT_DEFAULT
control_bounds_high : Union[float, np.ndarray], optional
upper bound of allowable control. Pass a list for element specific limit. By default 1
control_bounds_low : Union[float, np.ndarray], optional
lower bound of allowable control. Pass a list for element specific limit. By default -1
jit_compile_dict: Dict[str, bool], optional
Dictionary specifying which subroutines will be jax jit compiled. Behavior defined in self.compose()
"""
def __init__(
self,
*args,
m: float = M_DEFAULT,
n: float = N_DEFAULT,
v0: float = V0_DEFAULT,
v1_coef: float = V1_COEF_DEFAULT,
x_vel_limit: float = X_VEL_LIMIT_DEFAULT,
y_vel_limit: float = Y_VEL_LIMIT_DEFAULT,
z_vel_limit: float = Z_VEL_LIMIT_DEFAULT,
control_bounds_high: float = 1,
control_bounds_low: float = -1,
jit_compile_dict: Dict[str, bool] = None,
**kwargs
):
self.m = m
self.n = n
self.v0 = v0
self.v1_coef = v1_coef
self.x_vel_limit = x_vel_limit
self.y_vel_limit = y_vel_limit
self.z_vel_limit = z_vel_limit
if jit_compile_dict is None:
jit_compile_dict = {'generate_barrier_constraint_mats': True}
super().__init__(
*args,
control_dim=3,
control_bounds_high=control_bounds_high,
control_bounds_low=control_bounds_low,
jit_compile_dict=jit_compile_dict,
**kwargs
)
def _setup_properties(self):
self._setup_docking_properties(self.m, self.n, self.v1_coef, self.jit_compile_dict, 'RK45')
def _setup_constraints(self) -> OrderedDict:
return self._setup_docking_constraints(self.v0, self.v1, self.x_vel_limit, self.y_vel_limit, self.z_vel_limit)
def _pred_state(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray) -> jnp.ndarray:
pass
def state_transition_system(self, state: jnp.ndarray) -> jnp.ndarray:
return self._docking_f_x(state)
def state_transition_input(self, state: jnp.ndarray) -> jnp.ndarray:
return self._docking_g_x(state)
class Docking3dImplicitOptimizationRTA(ImplicitASIFModule, Docking3dRTAMixin):
"""
Implements Implicit Optimization RTA for the 3d Docking problem
Utilizes Implicit Active Set Invariance Function algorithm
Parameters
----------
backup_window : float
Duration of time in seconds to evaluate backup controller trajectory
num_check_all : int
Number of points at beginning of backup trajectory to check at every sequential simulation timestep.
Should be <= backup_window.
Defaults to 0 as skip_length defaults to 1 resulting in all backup trajectory points being checked.
skip_length : int
After num_check_all points in the backup trajectory are checked, the remainder of the backup window is filled by
skipping every skip_length points to reduce the number of backup trajectory constraints. Will always check the
last point in the backup trajectory as well.
Defaults to 1, resulting in no skipping.
m : float, optional
mass in kg of spacecraft, by default M_DEFAULT
n : float, optional
orbital mean motion in rad/s of current Hill's reference frame, by default N_DEFAULT
v0 : float, optional
Maximum safe docking velocity in m/s, by default V0_DEFAULT
v0 of v_limit = v0 + v1*n*||r||
v1_coef : float, optional
coefficient of linear component of the distance depending speed limit in 1/seconds, by default V1_COEF_DEFAULT
v1_coef of v_limit = v0 + v1_coef*n*||r||
x_vel_limit : float, optional
max velocity magnitude in the x direction, by default X_VEL_LIMIT_DEFAULT
y_vel_limit : float, optional
max velocity magnitude in the y direction, by default Y_VEL_LIMIT_DEFAULT
z_vel_limit : float, optional
max velocity magnitude in the z direction, by default Z_VEL_LIMIT_DEFAULT
control_bounds_high : Union[float, np.ndarray], optional
upper bound of allowable control. Pass a list for element specific limit. By default 1
control_bounds_low : Union[float, np.ndarray], optional
lower bound of allowable control. Pass a list for element specific limit. By default -1
backup_controller : RTABackupController, optional
backup controller object utilized by rta module to generate backup control.
By default Docking2dStopLQRBackupController
jit_compile_dict: Dict[str, bool], optional
Dictionary specifying which subroutines will be jax jit compiled. Behavior defined in self.compose()
integration_method: str, optional
Integration method to use, either 'RK45' or 'Euler'
"""
def __init__(
self,
*args,
backup_window: float = 5,
num_check_all: int = 5,
skip_length: int = 1,
m: float = M_DEFAULT,
n: float = N_DEFAULT,
v0: float = V0_DEFAULT,
v1_coef: float = V1_COEF_DEFAULT,
x_vel_limit: float = X_VEL_LIMIT_DEFAULT,
y_vel_limit: float = Y_VEL_LIMIT_DEFAULT,
z_vel_limit: float = Z_VEL_LIMIT_DEFAULT,
control_bounds_high: float = 1,
control_bounds_low: float = -1,
backup_controller: RTABackupController = None,
jit_compile_dict: Dict[str, bool] = None,
integration_method: str = 'RK45',
**kwargs
):
self.m = m
self.n = n
self.v0 = v0
self.v1_coef = v1_coef
self.x_vel_limit = x_vel_limit
self.y_vel_limit = y_vel_limit
self.z_vel_limit = z_vel_limit
self.integration_method = integration_method
if backup_controller is None:
backup_controller = Docking3dStopLQRBackupController(m=self.m, n=self.n)
if jit_compile_dict is None:
jit_compile_dict = {'generate_barrier_constraint_mats': False, 'generate_ineq_constraint_mats': True}
super().__init__(
*args,
control_dim=3,
backup_window=backup_window,
num_check_all=num_check_all,
skip_length=skip_length,
backup_controller=backup_controller,
control_bounds_high=control_bounds_high,
control_bounds_low=control_bounds_low,
jit_compile_dict=jit_compile_dict,
**kwargs
)
def _setup_properties(self):
self._setup_docking_properties(self.m, self.n, self.v1_coef, self.jit_compile_dict, self.integration_method)
def _setup_constraints(self) -> OrderedDict:
return self._setup_docking_constraints(self.v0, self.v1, self.x_vel_limit, self.y_vel_limit, self.z_vel_limit)
def _pred_state(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray) -> jnp.ndarray:
return self._docking_pred_state(state, step_size, control, self.integration_method)
def state_transition_system(self, state: jnp.ndarray) -> jnp.ndarray:
return self._docking_f_x(state)
def state_transition_input(self, state: jnp.ndarray) -> jnp.ndarray:
return self._docking_g_x(state)
class Docking3dStopLQRBackupController(RTABackupController):
"""Simple LQR controller to bring velocity to zero for 3d CWHSpacecraft
Parameters
----------
m : float, optional
mass in kg of spacecraft, by default M_DEFAULT
n : float, optional
orbital mean motion in rad/s of current Hill's reference frame, by default N_DEFAULT
"""
def __init__(self, m: float = M_DEFAULT, n: float = N_DEFAULT):
# LQR Gain Matrices
self.Q = jnp.multiply(.050, jnp.eye(6))
self.R = jnp.multiply(1000, jnp.eye(3))
self.A, self.B = generate_cwh_matrices(m, n, mode="3d")
# Solve the Algebraic Ricatti equation for the given system
P = scipy.linalg.solve_continuous_are(self.A, self.B, self.Q, self.R)
# Construct the constain gain matrix, K
self.K = jnp.linalg.inv(self.R) @ (jnp.transpose(self.B) @ P)
super().__init__()
def _generate_control(
self,
state: jnp.ndarray,
step_size: float,
controller_state: Union[jnp.ndarray, Dict[str, jnp.ndarray], None] = None
) -> Tuple[jnp.ndarray, None]:
state_des = jnp.copy(state)
state_des = state_des.at[3:].set(0)
error = state - state_des
backup_action = -self.K @ error
return backup_action, None
class ConstraintDocking3dRelativeVelocity(ConstraintModule):
"""CWH NMT velocity constraint
Parameters
----------
v0: float
NMT safety constraint velocity upper bound constatnt component where ||v|| <= v0 + v1*distance. m/s
v1: float
NMT safety constraint velocity upper bound distance proportinality coefficient where
||v|| <= v0 + v1*distance. m/s
delta: float
Small postiive value summed inside the vector norm sqrt operation to make constraint differentiable at 0
alpha : ConstraintStrengthener
Constraint Strengthener object used for ASIF methods. Required for ASIF methods.
Defaults to PolynomialConstraintStrengthener([0, 0.05, 0, 0.1])
"""
def __init__(self, v0: float, v1: float, delta: float = 1e-5, alpha: ConstraintStrengthener = None):
self.v0 = v0
self.v1 = v1
self.delta = delta
if alpha is None:
alpha = PolynomialConstraintStrengthener([0, 0.05, 0, 0.1])
super().__init__(alpha=alpha)
def _compute(self, state: jnp.ndarray) -> float:
return (self.v0 + self.v1 * norm_with_delta(state[0:3], self.delta)) - norm_with_delta(state[3:6], self.delta) | /run-time-assurance-1.0.1.tar.gz/run-time-assurance-1.0.1/run_time_assurance/zoo/cwh/docking_3d.py | 0.939616 | 0.424233 | docking_3d.py | pypi |
from collections import OrderedDict
from typing import Dict, Tuple, Union
import jax.numpy as jnp
import numpy as np
import scipy
from safe_autonomy_dynamics.base_models import BaseLinearODESolverDynamics
from safe_autonomy_dynamics.cwh import M_DEFAULT, N_DEFAULT, generate_cwh_matrices
from run_time_assurance.constraint import (
ConstraintMagnitudeStateLimit,
ConstraintModule,
ConstraintStrengthener,
PolynomialConstraintStrengthener,
)
from run_time_assurance.controller import RTABackupController
from run_time_assurance.rta import ExplicitASIFModule, ExplicitSimplexModule, ImplicitASIFModule, ImplicitSimplexModule
from run_time_assurance.state import RTAStateWrapper
from run_time_assurance.utils import norm_with_delta, to_jnp_array_jit
X_VEL_LIMIT_DEFAULT = 10
Y_VEL_LIMIT_DEFAULT = 10
V0_DEFAULT = 0.2
V1_COEF_DEFAULT = 2
V1_DEFAULT = V1_COEF_DEFAULT * N_DEFAULT
class Docking2dState(RTAStateWrapper):
"""RTA state for cwh docking 2d RTA"""
@property
def x(self) -> float:
"""Getter for x position"""
return self.vector[0]
@x.setter
def x(self, val: float):
"""Setter for x position"""
self.vector[0] = val
@property
def y(self) -> float:
"""Getter for y position"""
return self.vector[1]
@y.setter
def y(self, val: float):
"""Setter for y position"""
self.vector[1] = val
@property
def x_dot(self) -> float:
"""Getter for x velocity component"""
return self.vector[2]
@x_dot.setter
def x_dot(self, val: float):
"""Setter for x velocity component"""
self.vector[2] = val
@property
def y_dot(self) -> float:
"""Getter for y velocity component"""
return self.vector[3]
@y_dot.setter
def y_dot(self, val: float):
"""Setter for y velocity component"""
self.vector[3] = val
class Docking2dRTAMixin:
"""Mixin class provides 2D docking RTA util functions
Must call mixin methods using the RTA interface methods
"""
def _setup_docking_properties(self, m: float, n: float, v1_coef: float, jit_compile_dict: Dict[str, bool], integration_method: str):
"""Initializes docking specific properties from other class members"""
self.v1 = v1_coef * n
A, B = generate_cwh_matrices(m, n, mode="2d")
self.A = jnp.array(A)
self.B = jnp.array(B)
self.dynamics = BaseLinearODESolverDynamics(A=A, B=B, integration_method=integration_method)
if integration_method == 'RK45':
jit_compile_dict.setdefault('pred_state', False)
jit_compile_dict.setdefault('integrate', False)
if jit_compile_dict.get('pred_state'):
raise ValueError('pred_state uses RK45 integration and can not be compiled using jit')
if jit_compile_dict.get('integrate'):
raise ValueError('integrate uses RK45 integration and can not be compiled using jit')
elif integration_method == 'Euler':
jit_compile_dict.setdefault('pred_state', True)
jit_compile_dict.setdefault('integrate', True)
else:
raise ValueError('integration_method must be either RK45 or Euler')
def _setup_docking_constraints(self, v0: float, v1: float, x_vel_limit: float, y_vel_limit: float) -> OrderedDict:
"""generates constraints used in the docking problem"""
return OrderedDict(
[
('rel_vel', ConstraintDocking2dRelativeVelocity(v0=v0, v1=v1)),
('x_vel', ConstraintMagnitudeStateLimit(limit_val=x_vel_limit, state_index=2)),
('y_vel', ConstraintMagnitudeStateLimit(limit_val=y_vel_limit, state_index=3)),
]
)
def _docking_pred_state(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray, integration_method: str) -> jnp.ndarray:
"""Predicts the next state given the current state and control action"""
if integration_method == 'RK45':
next_state_vec, _ = self.dynamics.step(step_size, np.array(state), np.array(control))
out = to_jnp_array_jit(next_state_vec)
elif integration_method == 'Euler':
state_dot = self._docking_f_x(state) + self._docking_g_x(state) @ control
out = state + state_dot * step_size
else:
raise ValueError('integration_method must be either RK45 or Euler')
return out
def _docking_f_x(self, state: jnp.ndarray) -> jnp.ndarray:
"""Computes the system contribution to the state transition: f(x) of dx/dt = f(x) + g(x)u"""
return self.A @ state
def _docking_g_x(self, _: jnp.ndarray) -> jnp.ndarray:
"""Computes the control input contribution to the state transition: g(x) of dx/dt = f(x) + g(x)u"""
return jnp.copy(self.B)
class Docking2dExplicitSwitchingRTA(ExplicitSimplexModule, Docking2dRTAMixin):
"""Implements Explicit Switching RTA for the 2d Docking problem
Parameters
----------
m : float, optional
mass in kg of spacecraft, by default M_DEFAULT
n : float, optional
orbital mean motion in rad/s of current Hill's reference frame, by default N_DEFAULT
v0 : float, optional
Maximum safe docking velocity in m/s, by default V0_DEFAULT
v0 of v_limit = v0 + v1*n*||r||
v1_coef : float, optional
coefficient of linear component of the distance depending speed limit in 1/seconds, by default V1_COEF_DEFAULT
v1_coef of v_limit = v0 + v1_coef*n*||r||
x_vel_limit : float, optional
max velocity magnitude in the x direction, by default X_VEL_LIMIT_DEFAULT
y_vel_limit : float, optional
max velocity magnitude in the y direction, by default Y_VEL_LIMIT_DEFAULT
control_bounds_high : Union[float, list, np.ndarray, jnp.ndarray], optional
upper bound of allowable control. Pass a list for element specific limit. By default 1
control_bounds_low : Union[float, list, np.ndarray, jnp.ndarray], optional
lower bound of allowable control. Pass a list for element specific limit. By default -1
backup_controller : RTABackupController, optional
backup controller object utilized by rta module to generate backup control.
By default Docking2dStopLQRBackupController
jit_compile_dict: Dict[str, bool], optional
Dictionary specifying which subroutines will be jax jit compiled. Behavior defined in self.compose()
integration_method: str, optional
Integration method to use, either 'RK45' or 'Euler'
"""
def __init__(
self,
*args,
m: float = M_DEFAULT,
n: float = N_DEFAULT,
v0: float = V0_DEFAULT,
v1_coef: float = V1_COEF_DEFAULT,
x_vel_limit: float = X_VEL_LIMIT_DEFAULT,
y_vel_limit: float = Y_VEL_LIMIT_DEFAULT,
control_bounds_high: Union[float, np.ndarray] = 1,
control_bounds_low: Union[float, np.ndarray] = -1,
backup_controller: RTABackupController = None,
jit_compile_dict: Dict[str, bool] = None,
integration_method: str = 'RK45',
**kwargs
):
self.m = m
self.n = n
self.v0 = v0
self.v1_coef = v1_coef
self.x_vel_limit = x_vel_limit
self.y_vel_limit = y_vel_limit
self.integration_method = integration_method
if backup_controller is None:
backup_controller = Docking2dStopLQRBackupController(m=self.m, n=self.n)
if jit_compile_dict is None:
jit_compile_dict = {'constraint_violation': True}
super().__init__(
*args,
control_bounds_high=control_bounds_high,
control_bounds_low=control_bounds_low,
backup_controller=backup_controller,
jit_compile_dict=jit_compile_dict,
**kwargs
)
def _setup_properties(self):
self._setup_docking_properties(self.m, self.n, self.v1_coef, self.jit_compile_dict, self.integration_method)
def _setup_constraints(self) -> OrderedDict:
return self._setup_docking_constraints(self.v0, self.v1, self.x_vel_limit, self.y_vel_limit)
def _pred_state(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray) -> jnp.ndarray:
return self._docking_pred_state(state, step_size, control, self.integration_method)
class Docking2dImplicitSwitchingRTA(ImplicitSimplexModule, Docking2dRTAMixin):
"""Implements Implicit Switching RTA for the 2d Docking problem
Parameters
----------
backup_window : float
Duration of time in seconds to evaluate backup controller trajectory
m : float, optional
mass in kg of spacecraft, by default M_DEFAULT
n : float, optional
orbital mean motion in rad/s of current Hill's reference frame, by default N_DEFAULT
v0 : float, optional
Maximum safe docking velocity in m/s, by default V0_DEFAULT
v0 of v_limit = v0 + v1*n*||r||
v1_coef : float, optional
coefficient of linear component of the distance depending speed limit in 1/seconds, by default V1_COEF_DEFAULT
v1_coef of v_limit = v0 + v1_coef*n*||r||
x_vel_limit : float, optional
max velocity magnitude in the x direction, by default X_VEL_LIMIT_DEFAULT
y_vel_limit : float, optional
max velocity magnitude in the y direction, by default Y_VEL_LIMIT_DEFAULT
control_bounds_high : Union[float, list, np.ndarray, jnp.ndarray], optional
upper bound of allowable control. Pass a list for element specific limit. By default 1
control_bounds_low : Union[float, list, np.ndarray, jnp.ndarray], optional
lower bound of allowable control. Pass a list for element specific limit. By default -1
backup_controller : RTABackupController, optional
backup controller object utilized by rta module to generate backup control.
By default Docking2dStopLQRBackupController
jit_compile_dict: Dict[str, bool], optional
Dictionary specifying which subroutines will be jax jit compiled. Behavior defined in self.compose()
integration_method: str, optional
Integration method to use, either 'RK45' or 'Euler'
"""
def __init__(
self,
*args,
backup_window: float = 5,
m: float = M_DEFAULT,
n: float = N_DEFAULT,
v0: float = V0_DEFAULT,
v1_coef: float = V1_COEF_DEFAULT,
x_vel_limit: float = X_VEL_LIMIT_DEFAULT,
y_vel_limit: float = Y_VEL_LIMIT_DEFAULT,
control_bounds_high: Union[float, np.ndarray] = 1,
control_bounds_low: Union[float, np.ndarray] = -1,
backup_controller: RTABackupController = None,
jit_compile_dict: Dict[str, bool] = None,
integration_method: str = 'RK45',
**kwargs
):
self.m = m
self.n = n
self.v0 = v0
self.v1_coef = v1_coef
self.x_vel_limit = x_vel_limit
self.y_vel_limit = y_vel_limit
self.integration_method = integration_method
if backup_controller is None:
backup_controller = Docking2dStopLQRBackupController(m=self.m, n=self.n)
if jit_compile_dict is None:
jit_compile_dict = {'constraint_violation': True}
super().__init__(
*args,
backup_window=backup_window,
backup_controller=backup_controller,
control_bounds_high=control_bounds_high,
control_bounds_low=control_bounds_low,
jit_compile_dict=jit_compile_dict,
**kwargs
)
def _setup_properties(self):
self._setup_docking_properties(self.m, self.n, self.v1_coef, self.jit_compile_dict, self.integration_method)
def _setup_constraints(self) -> OrderedDict:
return self._setup_docking_constraints(self.v0, self.v1, self.x_vel_limit, self.y_vel_limit)
def _pred_state(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray) -> jnp.ndarray:
return self._docking_pred_state(state, step_size, control, self.integration_method)
class Docking2dExplicitOptimizationRTA(ExplicitASIFModule, Docking2dRTAMixin):
"""
Implements Explicit Optimization RTA for the 2d Docking problem
Utilizes Explicit Active Set Invariance Function algorithm
Parameters
----------
m : float, optional
mass in kg of spacecraft, by default M_DEFAULT
n : float, optional
orbital mean motion in rad/s of current Hill's reference frame, by default N_DEFAULT
v0 : float, optional
Maximum safe docking velocity in m/s, by default V0_DEFAULT
v0 of v_limit = v0 + v1*n*||r||
v1_coef : float, optional
coefficient of linear component of the distance depending speed limit in 1/seconds, by default V1_COEF_DEFAULT
v1_coef of v_limit = v0 + v1_coef*n*||r||
x_vel_limit : float, optional
max velocity magnitude in the x direction, by default X_VEL_LIMIT_DEFAULT
y_vel_limit : float, optional
max velocity magnitude in the y direction, by default Y_VEL_LIMIT_DEFAULT
control_bounds_high : Union[float, list, np.ndarray, jnp.ndarray], optional
upper bound of allowable control. Pass a list for element specific limit. By default 1
control_bounds_low : Union[float, list, np.ndarray, jnp.ndarray], optional
lower bound of allowable control. Pass a list for element specific limit. By default -1
jit_compile_dict: Dict[str, bool], optional
Dictionary specifying which subroutines will be jax jit compiled. Behavior defined in self.compose()
"""
def __init__(
self,
*args,
m: float = M_DEFAULT,
n: float = N_DEFAULT,
v0: float = V0_DEFAULT,
v1_coef: float = V1_COEF_DEFAULT,
x_vel_limit: float = X_VEL_LIMIT_DEFAULT,
y_vel_limit: float = Y_VEL_LIMIT_DEFAULT,
control_bounds_high: Union[float, np.ndarray] = 1,
control_bounds_low: Union[float, np.ndarray] = -1,
jit_compile_dict: Dict[str, bool] = None,
**kwargs
):
self.m = m
self.n = n
self.v0 = v0
self.v1_coef = v1_coef
self.x_vel_limit = x_vel_limit
self.y_vel_limit = y_vel_limit
if jit_compile_dict is None:
jit_compile_dict = {'generate_barrier_constraint_mats': True}
super().__init__(
*args,
control_dim=2,
control_bounds_high=control_bounds_high,
control_bounds_low=control_bounds_low,
jit_compile_dict=jit_compile_dict,
**kwargs
)
def _setup_properties(self):
self._setup_docking_properties(self.m, self.n, self.v1_coef, self.jit_compile_dict, 'RK45')
def _setup_constraints(self) -> OrderedDict:
return self._setup_docking_constraints(self.v0, self.v1, self.x_vel_limit, self.y_vel_limit)
def _pred_state(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray) -> jnp.ndarray:
pass
def state_transition_system(self, state: jnp.ndarray) -> jnp.ndarray:
return self._docking_f_x(state)
def state_transition_input(self, state: jnp.ndarray) -> jnp.ndarray:
return self._docking_g_x(state)
class Docking2dImplicitOptimizationRTA(ImplicitASIFModule, Docking2dRTAMixin):
"""
Implements Implicit Optimization RTA for the 2d Docking problem
Utilizes Implicit Active Set Invariance Function algorithm
Parameters
----------
backup_window : float
Duration of time in seconds to evaluate backup controller trajectory
num_check_all : int
Number of points at beginning of backup trajectory to check at every sequential simulation timestep.
Should be <= backup_window.
Defaults to 0 as skip_length defaults to 1 resulting in all backup trajectory points being checked.
skip_length : int
After num_check_all points in the backup trajectory are checked, the remainder of the backup window is filled by
skipping every skip_length points to reduce the number of backup trajectory constraints. Will always check the
last point in the backup trajectory as well.
Defaults to 1, resulting in no skipping.
m : float, optional
mass in kg of spacecraft, by default M_DEFAULT
n : float, optional
orbital mean motion in rad/s of current Hill's reference frame, by default N_DEFAULT
v0 : float, optional
Maximum safe docking velocity in m/s, by default V0_DEFAULT
v0 of v_limit = v0 + v1*n*||r||
v1_coef : float, optional
coefficient of linear component of the distance depending speed limit in 1/seconds, by default V1_COEF_DEFAULT
v1_coef of v_limit = v0 + v1_coef*n*||r||
x_vel_limit : float, optional
max velocity magnitude in the x direction, by default X_VEL_LIMIT_DEFAULT
y_vel_limit : float, optional
max velocity magnitude in the y direction, by default Y_VEL_LIMIT_DEFAULT
control_bounds_high : Union[float, list, np.ndarray, jnp.ndarray], optional
upper bound of allowable control. Pass a list for element specific limit. By default 1
control_bounds_low : Union[float, list, np.ndarray, jnp.ndarray], optional
lower bound of allowable control. Pass a list for element specific limit. By default -1
backup_controller : RTABackupController, optional
backup controller object utilized by rta module to generate backup control.
By default Docking2dStopLQRBackupController
jit_compile_dict: Dict[str, bool], optional
Dictionary specifying which subroutines will be jax jit compiled. Behavior defined in self.compose()
integration_method: str, optional
Integration method to use, either 'RK45' or 'Euler'
"""
def __init__(
self,
*args,
backup_window: float = 5,
num_check_all: int = 5,
skip_length: int = 1,
m: float = M_DEFAULT,
n: float = N_DEFAULT,
v0: float = V0_DEFAULT,
v1_coef: float = V1_COEF_DEFAULT,
x_vel_limit: float = X_VEL_LIMIT_DEFAULT,
y_vel_limit: float = Y_VEL_LIMIT_DEFAULT,
control_bounds_high: Union[float, np.ndarray] = 1,
control_bounds_low: Union[float, np.ndarray] = -1,
backup_controller: RTABackupController = None,
jit_compile_dict: Dict[str, bool] = None,
integration_method: str = 'RK45',
**kwargs
):
self.m = m
self.n = n
self.v0 = v0
self.v1_coef = v1_coef
self.x_vel_limit = x_vel_limit
self.y_vel_limit = y_vel_limit
self.integration_method = integration_method
if backup_controller is None:
backup_controller = Docking2dStopLQRBackupController(m=self.m, n=self.n)
if jit_compile_dict is None:
jit_compile_dict = {'generate_barrier_constraint_mats': False, 'generate_ineq_constraint_mats': True}
super().__init__(
*args,
control_dim=2,
backup_window=backup_window,
num_check_all=num_check_all,
skip_length=skip_length,
backup_controller=backup_controller,
control_bounds_high=control_bounds_high,
control_bounds_low=control_bounds_low,
jit_compile_dict=jit_compile_dict,
**kwargs
)
def _setup_properties(self):
self._setup_docking_properties(self.m, self.n, self.v1_coef, self.jit_compile_dict, self.integration_method)
def _setup_constraints(self) -> OrderedDict:
return self._setup_docking_constraints(self.v0, self.v1, self.x_vel_limit, self.y_vel_limit)
def _pred_state(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray) -> jnp.ndarray:
return self._docking_pred_state(state, step_size, control, self.integration_method)
def state_transition_system(self, state: jnp.ndarray) -> jnp.ndarray:
return self._docking_f_x(state)
def state_transition_input(self, state: jnp.ndarray) -> jnp.ndarray:
return self._docking_g_x(state)
class Docking2dStopLQRBackupController(RTABackupController):
"""Simple LQR controller to bring velocity to zero for 2d CWHSpacecraft
Parameters
----------
m : float, optional
mass in kg of spacecraft, by default M_DEFAULT
n : float, optional
orbital mean motion in rad/s of current Hill's reference frame, by default N_DEFAULT
"""
def __init__(self, m: float = M_DEFAULT, n: float = N_DEFAULT):
# LQR Gain Matrices
self.Q = jnp.multiply(.050, jnp.eye(4))
self.R = jnp.multiply(1000, jnp.eye(2))
self.A, self.B = generate_cwh_matrices(m, n, mode="2d")
# Solve the Algebraic Ricatti equation for the given system
P = scipy.linalg.solve_continuous_are(self.A, self.B, self.Q, self.R)
# Construct the constain gain matrix, K
self.K = jnp.linalg.inv(self.R) @ (jnp.transpose(self.B) @ P)
super().__init__()
def _generate_control(
self,
state: jnp.ndarray,
step_size: float,
controller_state: Union[jnp.ndarray, Dict[str, jnp.ndarray], None] = None
) -> Tuple[jnp.ndarray, None]:
state_des = jnp.copy(state)
state_des = state_des.at[2:].set(0)
error = state - state_des
backup_action = -self.K @ error
return backup_action, None
class ConstraintDocking2dRelativeVelocity(ConstraintModule):
"""CWH NMT velocity constraint
Parameters
----------
v0: float
NMT safety constraint velocity upper bound constatnt component where ||v|| <= v0 + v1*distance. m/s
v1: float
NMT safety constraint velocity upper bound distance proportinality coefficient where
||v|| <= v0 + v1*distance. m/s
delta: float
Small postiive value summed inside the vector norm sqrt operation to make constraint differentiable at 0
alpha : ConstraintStrengthener
Constraint Strengthener object used for ASIF methods. Required for ASIF methods.
Defaults to PolynomialConstraintStrengthener([0, 0.05, 0, 0.1])
"""
def __init__(self, v0: float, v1: float, delta: float = 1e-5, alpha: ConstraintStrengthener = None):
self.v0 = v0
self.v1 = v1
self.delta = delta
if alpha is None:
alpha = PolynomialConstraintStrengthener([0, 0.05, 0, 0.1])
super().__init__(alpha=alpha)
def _compute(self, state: jnp.ndarray) -> float:
return self.v0 + self.v1 * norm_with_delta(state[0:2], self.delta) - norm_with_delta(state[2:4], self.delta) | /run-time-assurance-1.0.1.tar.gz/run-time-assurance-1.0.1/run_time_assurance/zoo/cwh/docking_2d.py | 0.931119 | 0.467149 | docking_2d.py | pypi |
from __future__ import annotations
import abc
import warnings
from typing import Any, Dict, Union
import jax.numpy as jnp
import numpy as np
import quadprog
from jax import jacfwd, jit, vmap
from run_time_assurance.constraint import ConstraintModule
from run_time_assurance.controller import RTABackupController
from run_time_assurance.rta.base import BackupControlBasedRTA, ConstraintBasedRTA
from run_time_assurance.utils import SolverError, SolverWarning, to_jnp_array_jit
class ASIFModule(ConstraintBasedRTA):
"""
Base class for Active Set Invariance Filter Optimization RTA
Only supports dynamical systems with dynamics in the form of:
dx/dt = f(x) + g(x)u
Parameters
----------
epsilon : float
threshold distance between desired action and actual safe action at which the rta is said to be intervening
default 1e-2
control_dim : int
length of control vector
solver_exception : bool
When the solver cannot find a solution, True for an exception and False for a warning
"""
def __init__(self, *args: Any, epsilon: float = 1e-2, control_dim: int, solver_exception: bool = False, **kwargs: Any):
self.epsilon = epsilon
super().__init__(*args, **kwargs)
self.control_dim = control_dim
self.solver_exception = solver_exception
self.obj_weight = np.eye(self.control_dim)
self.ineq_weight_actuation, self.ineq_constant_actuation = self._generate_actuation_constraint_mats()
def compose(self):
"""
applies jax composition transformations (grad, jit, jacobian etc.)
jit complilation is determined by the jit_compile_dict constructor parameter
jit compilation settings:
generate_barrier_constraint_mats:
default True
"""
super().compose()
if self.jit_compile_dict.get('generate_barrier_constraint_mats', True):
self._generate_barrier_constraint_mats_fn = jit(self._generate_barrier_constraint_mats, static_argnames=['step_size'])
else:
self._generate_barrier_constraint_mats_fn = self._generate_barrier_constraint_mats
def _filter_control(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray) -> jnp.ndarray:
ineq_weight, ineq_constant = self._generate_barrier_constraint_mats_fn(state, step_size)
desired_control = np.array(control, dtype=np.float64)
actual_control = self._optimize(self.obj_weight, desired_control, ineq_weight, ineq_constant)
self.intervening = self.monitor(desired_control, actual_control)
return to_jnp_array_jit(actual_control)
def _generate_actuation_constraint_mats(self) -> tuple[jnp.ndarray, jnp.ndarray]:
"""generates matrices for quadratic program optimization inequality constraint matrices that impose actuator limits
on optimized control vector
Returns
-------
jnp.ndarray
matix C.T of quadprog inequality constraint C.T x >= b
jnp.ndarray
vector b of quadprog inequality constraint C.T x >= b
"""
ineq_weight = jnp.empty((0, self.control_dim))
ineq_constant = jnp.empty(0)
if self.control_bounds_low is not None:
c, b = get_lower_bound_ineq_constraint_mats(self.control_bounds_low, self.control_dim)
ineq_weight = jnp.vstack((ineq_weight, c))
ineq_constant = jnp.concatenate((ineq_constant, b))
if self.control_bounds_high is not None:
c, b = get_lower_bound_ineq_constraint_mats(self.control_bounds_high, self.control_dim)
c *= -1
b *= -1
ineq_weight = jnp.vstack((ineq_weight, c))
ineq_constant = jnp.concatenate((ineq_constant, b))
return ineq_weight, ineq_constant
def _optimize(
self, obj_weight: np.ndarray, obj_constant: np.ndarray, ineq_weight: jnp.ndarray, ineq_constant: jnp.ndarray
) -> np.ndarray:
"""Solve ASIF optimization problem via quadratic program
Parameters
----------
obj_weight : np.ndarray
matix G of quadprog objective 1/2 x^T G x - a^T x
obj_constant : np.ndarray
vector a of quadprog objective 1/2 x^T G x - a^T x
ineq_weight : jnp.ndarray
matix C.T of quadprog inequality constraint C.T x >= b
ineq_constant : jnp.ndarray
vector b of quadprog inequality constraint C.T x >= b
Returns
-------
np.ndarray
Actual control solved by QP
"""
try:
opt = quadprog.solve_qp(
obj_weight, obj_constant, np.array(ineq_weight, dtype=np.float64), np.array(ineq_constant, dtype=np.float64), 0
)[0]
except ValueError as e:
if e.args[0] == "constraints are inconsistent, no solution":
if not self.solver_exception:
warnings.warn(SolverWarning())
opt = obj_constant
else:
raise SolverError() from e
else:
raise e
return opt
def monitor(self, desired_control: np.ndarray, actual_control: np.ndarray) -> bool:
"""Determines whether the ASIF RTA module is currently intervening
Parameters
----------
desired_control : np.ndarray
desired control vector
actual_control : np.ndarray
actual control vector produced by ASIF optimization
Returns
-------
bool
True if rta module is interveining
"""
return bool(np.linalg.norm(desired_control - actual_control) > self.epsilon)
@abc.abstractmethod
def _generate_barrier_constraint_mats(self, state: jnp.ndarray, step_size: float) -> tuple[jnp.ndarray, jnp.ndarray]:
"""generates matrices for quadratic program optimization inequality constraint matrices corresponding to safety
barrier constraints
Parameters
----------
state : jnp.ndarray
current rta state of the system
step_size : float
duration of control step
Returns
-------
jnp.ndarray
matix C.T of quadprog inequality constraint C.T x >= b
jnp.ndarray
vector b of quadprog inequality constraint C.T x >= b
"""
raise NotImplementedError()
@abc.abstractmethod
def state_transition_system(self, state: jnp.ndarray) -> jnp.ndarray:
"""Computes the system state contribution to the system state's time derivative
i.e. implements f(x) from dx/dt = f(x) + g(x)u
Parameters
----------
state : jnp.ndarray
current rta state of the system
Returns
-------
jnp.ndarray
state time derivative contribution from the current system state
"""
raise NotImplementedError
@abc.abstractmethod
def state_transition_input(self, state: jnp.ndarray) -> jnp.ndarray:
"""Computes the control input matrix contribution to the system state's time derivative
i.e. implements g(x) from dx/dt = f(x) + g(x)u
Parameters
----------
state : jnp.ndarray
current rta state of the system
Returns
-------
jnp.ndarray
input matrix in state space representation time derivative
"""
raise NotImplementedError
class ExplicitASIFModule(ASIFModule):
"""
Base class implementation of Explicit ASIF RTA
Only supports dynamical systems with dynamics in the form of:
dx/dt = f(x) + g(x)u
Only supports constraints with relative degree difference of 1 between constraint jacobian and
control input matrix g(x).
Parameters
----------
epislon : float
threshold distance between desired action and actual safe action at which the rta is said to be intervening
default 1e-2
control_dim : int
length of control vector
"""
def _generate_barrier_constraint_mats(self, state: jnp.ndarray, step_size: float) -> tuple[jnp.ndarray, jnp.ndarray]:
"""generates matrices for quadratic program optimization inequality constraint matrices corresponding to safety
barrier constraints
Applies Nagumo's condition to safety constraints
Parameters
----------
state : jnp.ndarray
current rta state of the system
step_size : float
duration of control step
Returns
-------
jnp.ndarray
matix C.T of quadprog inequality constraint C.T x >= b
jnp.ndarray
vector b of quadprog inequality constraint C.T x >= b
"""
ineq_weight_barrier = jnp.empty((0, self.control_dim))
ineq_constant_barrier = jnp.empty(0)
for c in self.constraints.values():
grad_x = c.grad(state)
temp1 = grad_x @ self.state_transition_input(state)
temp2 = -grad_x @ self.state_transition_system(state) - c.alpha(c(state))
ineq_weight_barrier = jnp.append(ineq_weight_barrier, temp1[None, :], axis=0)
ineq_constant_barrier = jnp.append(ineq_constant_barrier, temp2)
ineq_weight = jnp.concatenate((self.ineq_weight_actuation, ineq_weight_barrier))
ineq_constant = jnp.concatenate((self.ineq_constant_actuation, ineq_constant_barrier))
return ineq_weight.transpose(), ineq_constant
class ImplicitASIFModule(ASIFModule, BackupControlBasedRTA):
"""
Base class implementation of implicit ASIF RTA
Requires a backup controller that provides a jacobian of output wrt state vector
Only supports dynamical systems with dynamics in the form of:
dx/dt = f(x) + g(x)u
Parameters
----------
backup_window : float
Duration of time in seconds to evaluate backup controller trajectory.
num_check_all : int
Number of points at beginning of backup trajectory to check at every sequential simulation timestep.
Should be <= backup_window.
Defaults to 0 as skip_length defaults to 1 resulting in all backup trajectory points being checked.
skip_length : int
After num_check_all points in the backup trajectory are checked, the remainder of the backup window is filled by
skipping every skip_length points to reduce the number of backup trajectory constraints.
Defaults to 1, resulting in no skipping.
subsample_constraints_num_least : int
subsample the backup trajectory down to the points with the N least constraint function outputs
i.e. the n points closest to violating a safety constraint
backup_controller : RTABackupController
backup controller object utilized by rta module to generate backup control
"""
def __init__(
self,
*args: Any,
backup_window: float,
num_check_all: int = 0,
skip_length: int = 1,
subsample_constraints_num_least: int = None,
backup_controller: RTABackupController,
**kwargs: Any,
):
self.backup_window = backup_window
self.num_check_all = num_check_all
self.skip_length = skip_length
assert (subsample_constraints_num_least is None) or \
(isinstance(subsample_constraints_num_least, int) and subsample_constraints_num_least > 0), \
"subsample_constraints_num_least must be a positive integer or None"
self.subsample_constraints_num_least = subsample_constraints_num_least
super().__init__(*args, backup_controller=backup_controller, **kwargs)
def reset(self):
"""Resets the rta module to the initial state at the beginning of an episode
Also calls reset on the backup controller
"""
super().reset()
self.reset_backup_controller()
def compose(self):
"""
applies jax composition transformations (grad, jit, jacobian etc.)
jit complilation is determined by the jit_compile_dict constructor parameter
jit compilation settings:
generate_ineq_constraint_mats:
default True
pred_state:
default False
integrate:
default False
"""
self._jacobian = jit(jacfwd(self._backup_state_transition), static_argnums=[1], static_argnames=['step_size'])
if self.jit_compile_dict.get('generate_ineq_constraint_mats', True):
self._generate_ineq_constraint_mats_fn = jit(self._generate_ineq_constraint_mats, static_argnames=['num_steps'])
else:
self._generate_ineq_constraint_mats_fn = self._generate_ineq_constraint_mats
if self.jit_compile_dict.get('pred_state', False):
self._pred_state_fn = jit(self._pred_state, static_argnames=['step_size'])
else:
self._pred_state_fn = self._pred_state
if self.jit_compile_dict.get('integrate', False):
self._integrate_fn = jit(self.integrate, static_argnames=['step_size', 'Nsteps'])
else:
self._integrate_fn = self.integrate
super().compose()
def jacobian(self, state: jnp.ndarray, step_size: float, controller_state: Union[jnp.ndarray, Dict[str, jnp.ndarray]] = None):
"""Computes Jacobian of system state transition J(f(x) + g(x,u)) wrt x
Parameters
----------
state : jnp.ndarray
Current jnp.ndarray of the system at which to evaluate Jacobian
step_size : float
simulation integration step size
controller_state: jnp.ndarray or Dict[str, jnp.ndarray] or None
internal controller state. For stateful controllers, all states that are modified in the control computation
(e.g. integral control error buffers) must be contained within controller_state
Returns
-------
jnp.ndarray
Jacobian matrix of state transition
"""
return self._jacobian(state, step_size, controller_state)
def _backup_state_transition(
self, state: jnp.ndarray, step_size: float, controller_state: Union[jnp.ndarray, Dict[str, jnp.ndarray]] = None
):
return self.state_transition_system(state) + self.state_transition_input(state) @ (
self.backup_controller.generate_control_with_controller_state(state, step_size, controller_state)[0]
)
def _generate_barrier_constraint_mats(self, state: jnp.ndarray, step_size: float) -> tuple[jnp.ndarray, jnp.ndarray]:
"""generates matrices for quadratic program optimization inequality constraint matrices corresponding to safety
barrier constraints
Computes backup trajectory with backup controller and applies Nagumo's condition on the safety constraints at
points along backup trajectory.
Parameters
----------
state : jnp.ndarray
current rta state of the system
step_size : float
duration of control step
Returns
-------
jnp.ndarray
matix C.T of quadprog inequality constraint C.T x >= b
jnp.ndarray
vector b of quadprog inequality constraint C.T x >= b
"""
num_steps = int(self.backup_window / step_size) + 1
traj_states, traj_sensitivities = self._integrate_fn(state, step_size, num_steps)
return self._generate_ineq_constraint_mats_fn(state, num_steps, traj_states, traj_sensitivities)
def _generate_ineq_constraint_mats(self, state: jnp.ndarray, num_steps: int, traj_states: jnp.ndarray,
traj_sensitivities: jnp.ndarray) -> tuple[jnp.ndarray, jnp.ndarray]:
"""generates quadratic program optimization inequality constraint matrices corresponding to safety
Parameters
----------
state : jnp.ndarray
current rta state of the system
num_steps : int
number of trajectory steps
traj_states : jnp.ndarray
list of rta states from along the trajectory
traj_sensitivities: jnp.ndarray
list of trajectory state sensitivities (i.e. jacobian wrt initial trajectory state).
Elements are jnp.ndarrays with size (n, n) where n = state.size
Returns
-------
jnp.ndarray
matix C.T of quadprog inequality constraint C.T x >= b
jnp.ndarray
vector b of quadprog inequality constraint C.T x >= b
"""
ineq_weight_barrier = jnp.empty((0, self.control_dim))
ineq_constant_barrier = jnp.empty(0)
check_points = jnp.hstack(
(
jnp.array(range(0, self.num_check_all + 1)),
jnp.array(range(self.num_check_all + self.skip_length, num_steps, self.skip_length))
)
).astype(int)
# resample checkpoints to the trajectory points with the min constraint values
if self.subsample_constraints_num_least is not None:
# evaluate constraints at all trajectory points
constraint_vals = []
for i in check_points:
traj_state = traj_states[i]
constraint_val = min([c(traj_state) for c in self.constraints.values()])
constraint_vals.append(constraint_val)
constraint_sorted_idxs = jnp.argsort(constraint_vals)
check_points = [check_points[i] for i in constraint_sorted_idxs[0:self.subsample_constraints_num_least]]
traj_states = jnp.array(traj_states)[check_points, :]
traj_sensitivities = jnp.array(traj_sensitivities)[check_points, :]
constraint_list = list(self.constraints.values())
num_constraints = len(self.constraints)
for i in range(num_constraints):
constraint_vmapped = vmap(self.invariance_constraints, (None, None, 0, 0), (0, 0))
point_ineq_weight, point_ineq_constant = constraint_vmapped(constraint_list[i], state, traj_states, traj_sensitivities)
ineq_weight_barrier = jnp.concatenate((ineq_weight_barrier, point_ineq_weight), axis=0)
ineq_constant_barrier = jnp.concatenate((ineq_constant_barrier, point_ineq_constant), axis=0)
ineq_weight = jnp.concatenate((self.ineq_weight_actuation, ineq_weight_barrier))
ineq_constant = jnp.concatenate((self.ineq_constant_actuation, ineq_constant_barrier))
return ineq_weight.transpose(), ineq_constant
def invariance_constraints(
self, constraint: ConstraintModule, initial_state: jnp.ndarray, traj_state: jnp.ndarray, traj_sensitivity: jnp.ndarray
) -> tuple[jnp.ndarray, jnp.ndarray]:
"""Computes safety constraint invariance constraints via Nagumo's condition for a point in the backup trajectory
Parameters
----------
constraint : ConstraintModule
constraint to create cbf for
initial_state : jnp.ndarray
initial state of the backup trajectory
traj_state : list
arbitrary state in the backup trajectory
traj_sensitivity : list
backup trajectory state sensitivity (i.e. jacobian relative to the initial state)
Returns
-------
jnp.ndarray
matix C.T of quadprog inequality constraint C.T x >= b
jnp.ndarray
vector b of quadprog inequality constraint C.T x >= b
"""
traj_state_array = jnp.array(traj_state)
traj_sensitivity_array = jnp.array(traj_sensitivity)
f_x0 = self.state_transition_system(initial_state)
g_x0 = self.state_transition_input(initial_state)
grad_x = constraint.grad(traj_state_array)
ineq_weight = grad_x @ (traj_sensitivity_array @ g_x0)
ineq_constant = grad_x @ (traj_sensitivity_array @ f_x0) \
+ constraint.alpha(constraint(traj_state_array))
return ineq_weight, -ineq_constant
def integrate(self, state: jnp.ndarray, step_size: float, Nsteps: int) -> tuple[list, list]:
"""Estimate backup trajectory by polling backup controller backup control and integrating system dynamics
Parameters
----------
state : jnp.ndarray
initial rta state of the system
step_size : float
simulation integration step size
Nsteps : int
number of simulation integration steps
Returns
-------
list
list of rta states from along the trajectory
list
list of trajectory state sensitivities (i.e. jacobian wrt initial trajectory state)
"""
sensitivity = jnp.eye(state.size)
traj_states = [state.copy()]
traj_sensitivity = [sensitivity]
self.backup_controller_save()
for _ in range(1, Nsteps):
control = self.backup_control(state, step_size)
state = self._pred_state_fn(state, step_size, control)
traj_jac = self.jacobian(state, step_size, self.backup_controller.controller_state)
sensitivity = sensitivity + (traj_jac @ sensitivity) * step_size
traj_states.append(state)
traj_sensitivity.append(sensitivity)
self.backup_controller_restore()
return traj_states, traj_sensitivity
def get_lower_bound_ineq_constraint_mats(bound: Union[int, float, np.ndarray, jnp.ndarray],
vec_len: int) -> tuple[jnp.ndarray, jnp.ndarray]:
"""Computes inequality constraint matrices for applying a lower bound to optimization var in quadprog
Parameters
----------
bound : Union[jnp.ndarray, int, float]
Lower bound for optimization variable.
If jnp.ndarray, must be same length as optimization variable. Will be applied elementwise.
If number, will be applied to all elements.
vec_len : int
optimization variable vector length
Returns
-------
jnp.ndarray
matix C.T of quadprog inequality constraint C.T x >= b
jnp.ndarray
vector b of quadprog inequality constraint C.T x >= b
"""
c = jnp.eye(vec_len)
if isinstance(bound, jnp.ndarray):
assert bound.shape == (vec_len, ), f"the shape of bound must be ({vec_len},)"
b = jnp.copy(bound)
else:
b = bound * jnp.ones(vec_len)
return c, b | /run-time-assurance-1.0.1.tar.gz/run-time-assurance-1.0.1/run_time_assurance/rta/asif.py | 0.939658 | 0.407481 | asif.py | pypi |
from __future__ import annotations
import abc
from typing import Any
import jax.numpy as jnp
from jax import jit, vmap
from run_time_assurance.controller import RTABackupController
from run_time_assurance.rta.base import BackupControlBasedRTA
from run_time_assurance.utils import add_dim_jit, jnp_stack_jit
class SimplexModule(BackupControlBasedRTA):
"""Base class for simplex RTA modules.
Simplex methods for RTA utilize a monitor that detects unsafe behavior and a backup controller that takes over to
prevent the unsafe behavior
Parameters
----------
backup_controller : RTABackupController
backup controller object utilized by rta module to generate backup control
"""
def reset(self):
"""Resets the rta module to the initial state at the beginning of an episode
Also calls reset on the backup controller
"""
super().reset()
self.reset_backup_controller()
def compose(self):
"""
applies jax composition transformations (grad, jit, jacobian etc.)
jit complilation is determined by the jit_compile_dict constructor parameter
jit compilation settings:
constraint_violation:
default True
pred_state:
default False
"""
super().compose()
if self.jit_compile_dict.get('constraint_violation', True):
self._constraint_violation_fn = jit(self._constraint_violation)
else:
self._constraint_violation_fn = self._constraint_violation
if self.jit_compile_dict.get('pred_state', False):
self._pred_state_fn = jit(self._pred_state, static_argnames=['step_size'])
else:
self._pred_state_fn = self._pred_state
def _filter_control(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray) -> jnp.ndarray:
"""Simplex implementation of filter control
Returns backup control if monitor returns True
"""
self.intervening = self.monitor(state, step_size, control)
if self.intervening:
return self.backup_control(state, step_size)
return control
def monitor(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray) -> bool:
"""Detects if desired control will result in an unsafe state
Parameters
----------
state : jnp.ndarray
Current rta state of the system
step_size : float
time duration over which filtered control will be applied to actuators
control : np.ndarray
desired control vector
Returns
-------
bool
return False if desired control is safe and True if unsafe
"""
return self._monitor(state, step_size, control, self.intervening)
@abc.abstractmethod
def _monitor(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray, intervening: bool) -> bool:
"""custom monitor implementation
Parameters
----------
state : jnp.ndarray
Current rta state of the system
step_size : float
time duration over which filtered control will be applied to actuators
control : np.ndarray
desired control vector
intervening : bool
Indicates whether simplex rta is currently intervening with the backup controller or not
Returns
-------
bool
return False if desired control is safe and True if unsafe
"""
raise NotImplementedError()
def _constraint_violation(self, states: jnp.ndarray) -> bool:
constraint_list = list(self.constraints.values())
num_constraints = len(self.constraints)
constraint_violations = jnp.zeros(num_constraints)
for i in range(num_constraints):
c = constraint_list[i]
constraint_vmapped = vmap(c.compute, 0, 0)
traj_constraint_vals = constraint_vmapped(states)
constraint_violations = constraint_violations.at[i].set(jnp.any(traj_constraint_vals < 0))
return jnp.any(constraint_violations)
class ExplicitSimplexModule(SimplexModule):
"""Base implementation for Explicit Simplex RTA module
Switches to backup controller if desired control would evaluate safety constraint at next timestep
Requires a backup controller which is known safe within the constraint set
"""
def _monitor(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray, intervening: bool) -> bool:
pred_state = self._pred_state_fn(state, step_size, control)
return bool(self._constraint_violation_fn(add_dim_jit(pred_state)))
class ImplicitSimplexModule(SimplexModule):
"""Base implementation for Explicit Simplex RTA module
Switches to backup controller if desired control would result in a state from which the backup controller cannot
recover
This is determined by computing a trajectory under the backup controller and ensuring that the safety constraints
aren't violated along it
Parameters
----------
backup_window : float
Duration of time in seconds to evaluate backup controller trajectory
backup_controller : RTABackupController
backup controller object utilized by rta module to generate backup control
"""
def __init__(self, *args: Any, backup_window: float, backup_controller: RTABackupController, **kwargs: Any):
self.backup_window = backup_window
super().__init__(*args, backup_controller=backup_controller, **kwargs)
def compose(self):
"""
applies jax composition transformations (grad, jit, jacobian etc.)
jit complilation is determined by the jit_compile_dict constructor parameter
jit compilation settings:
integrate:
Backup controller trajectory integration
default False
See parent class for additional options
"""
super().compose()
if self.jit_compile_dict.get('integrate', False):
self._integrate_fn = jit(self.integrate, static_argnames=['step_size'])
else:
self._integrate_fn = self.integrate
def _monitor(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray, intervening: bool) -> bool:
traj_states = self._integrate_fn(state, step_size, control)
return bool(self._constraint_violation_fn(traj_states))
def integrate(self, state: jnp.ndarray, step_size: float, desired_control: jnp.ndarray) -> jnp.ndarray:
"""Estimate backup trajectory by polling backup controller backup control and integrating system dynamics
Parameters
----------
state : jnp.ndarray
initial rta state of the system
step_size : float
simulation integration step size
desired_control : jnp.ndarray
control desired by the primary controller
Returns
-------
jnp.ndarray
jax array of implict backup trajectory states.
Shape (M, N) where M is number of states and N is the dimension of the state vector
"""
Nsteps = int(self.backup_window / step_size)
state = self._pred_state_fn(state, step_size, desired_control)
traj_states = [state]
self.backup_controller_save()
for _ in range(Nsteps):
control = self.backup_control(state, step_size)
state = self._pred_state_fn(state, step_size, control)
traj_states.append(state)
self.backup_controller_restore()
return jnp_stack_jit(traj_states, axis=0) | /run-time-assurance-1.0.1.tar.gz/run-time-assurance-1.0.1/run_time_assurance/rta/simplex.py | 0.950261 | 0.430806 | simplex.py | pypi |
from __future__ import annotations
import abc
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Union
import jax.numpy as jnp
import numpy as np
from run_time_assurance.constraint import ConstraintModule
from run_time_assurance.controller import RTABackupController
from run_time_assurance.utils import to_jnp_array_jit
class RTAModule(abc.ABC):
"""Base class for RTA modules
Parameters
----------
control_bounds_high : Union[float, int, list, np.ndarray], optional
upper bound of allowable control. Pass a list for element specific limit. By default None
control_bounds_low : Union[float, int, list, np.ndarray], optional
upper bound of allowable control. Pass a list for element specific limit. By default None
"""
def __init__(
self,
*args: Any,
control_bounds_high: Union[float, int, list, np.ndarray] = None,
control_bounds_low: Union[float, int, list, np.ndarray] = None,
**kwargs: Any
):
if isinstance(control_bounds_high, (list)):
control_bounds_high = np.array(control_bounds_high, float)
if isinstance(control_bounds_low, (list)):
control_bounds_low = np.array(control_bounds_low, float)
self.control_bounds_high = control_bounds_high
self.control_bounds_low = control_bounds_low
self.enable = True
self.intervening = False
self.control_desired: Optional[np.ndarray] = None
self.control_actual: Optional[np.ndarray] = None
super().__init__(*args, **kwargs)
def reset(self):
"""Resets the rta module to the initial state at the beginning of an episode
"""
self.enable = True
self.intervening = False
self.control_desired: np.ndarray = None
self.control_actual: np.ndarray = None
def filter_control(self, input_state: Any, step_size: float, control_desired: np.ndarray) -> np.ndarray:
"""filters desired control into safe action
Parameters
----------
input_state
input state of environment to RTA module. May be any custom state type.
step_size : float
time duration over which filtered control will be applied to actuators
control_desired : np.ndarray
desired control vector
Returns
-------
np.ndarray
safe filtered control vector
"""
self.control_desired = np.copy(control_desired)
if self.enable:
control_actual = self._clip_control(self.compute_filtered_control(input_state, step_size, control_desired))
self.control_actual = np.array(control_actual)
else:
self.control_actual = np.copy(control_desired)
return np.copy(self.control_actual)
@abc.abstractmethod
def compute_filtered_control(self, input_state: Any, step_size: float, control_desired: np.ndarray) -> np.ndarray:
"""custom logic for filtering desired control into safe action
Parameters
----------
input_state : Any
input state of environment to RTA module. May be any custom state type.
step_size : float
simulation step size
control_desired : np.ndarray
desired control vector
Returns
-------
np.ndarray
safe filtered control vector
"""
raise NotImplementedError()
def generate_info(self) -> dict:
"""generates info dictionary on RTA module for logging
Returns
-------
dict
info dictionary for rta module
"""
info = {
'enable': self.enable,
'intervening': self.intervening,
'control_desired': self.control_desired,
'control_actual': self.control_actual,
}
return info
def _clip_control(self, control: np.ndarray) -> np.ndarray:
"""clip control vector values to specified upper and lower bounds
Parameters
----------
control : np.ndarray
raw control vector
Returns
-------
np.ndarray
clipped control vector
"""
if self.control_bounds_low is not None or self.control_bounds_high is not None:
control = np.clip(control, self.control_bounds_low, self.control_bounds_high) # type: ignore
return control
class ConstraintBasedRTA(RTAModule):
"""Base class for constraint-based RTA systems
Parameters
----------
control_bounds_high : Union[float, int, list, np.ndarray], optional
upper bound of allowable control. Pass a list for element specific limit. By default None
control_bounds_low : Union[float, int, list, np.ndarray], optional
upper bound of allowable control. Pass a list for element specific limit. By default None
jit_compile_dict: Dict[str, bool], optional
Dictionary specifying which subroutines will be jax jit compiled. Behavior defined in self.compose()
Useful for implementing versions methods that can't be jit compiled
Each RTA class will have custom default behavior if not passed
"""
def __init__(
self,
*args: Any,
control_bounds_high: Union[float, int, list, np.ndarray] = None,
control_bounds_low: Union[float, int, list, np.ndarray] = None,
jit_compile_dict: Dict[str, bool] = None,
**kwargs: Any
):
super().__init__(*args, control_bounds_high=control_bounds_high, control_bounds_low=control_bounds_low, **kwargs)
if jit_compile_dict is None:
self.jit_compile_dict = {}
else:
self.jit_compile_dict = jit_compile_dict
self._setup_properties()
self.constraints = self._setup_constraints()
self.compose()
def compute_filtered_control(self, input_state: Any, step_size: float, control_desired: np.ndarray) -> np.ndarray:
"""filters desired control into safe action
Parameters
----------
input_state : Any
input state of environment to RTA module. May be any custom state type.
If using a custom state type, make sure to implement _get_state to traslate into an RTA state.
If custom _get_state() method is not implemented, must be an RTAState or numpy.ndarray instance.
step_size : float
time duration over which filtered control will be applied to actuators
control_desired : np.ndarray
desired control vector
Returns
-------
np.ndarray
safe filtered control vector
"""
state = self._get_state(input_state)
control_actual = self._filter_control(state, step_size, to_jnp_array_jit(control_desired))
self.control_actual = np.array(control_actual)
return np.copy(control_actual)
@abc.abstractmethod
def _filter_control(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray) -> jnp.ndarray:
"""custom logic for filtering desired control into safe action
Parameters
----------
state : jnp.ndarray
current rta state of the system
step_size : float
simulation step size
control : jnp.ndarray
desired control vector
Returns
-------
jnp.ndarray
safe filtered control vector
"""
raise NotImplementedError()
def _setup_properties(self):
"""Additional initialization function to allow custom initialization to run after baseclass initialization,
but before constraint initialization"""
def compose(self):
"""
applies jax composition transformations (grad, jit, jacobian etc.)
jit complilation is determined by the jit_compile_dict constructor parameter
"""
@abc.abstractmethod
def _setup_constraints(self) -> OrderedDict[str, ConstraintModule]:
"""Initializes and returns RTA constraints
Returns
-------
OrderedDict
OrderedDict of rta contraints with name string keys and ConstraintModule object values
"""
raise NotImplementedError()
@abc.abstractmethod
def _pred_state(self, state: jnp.ndarray, step_size: float, control: jnp.ndarray) -> jnp.ndarray:
"""predict the next state of the system given the current state, step size, and control vector"""
raise NotImplementedError()
def _clip_control_jax(self, control: jnp.ndarray) -> jnp.ndarray:
"""jax version of clip control for clipping control vector values to specified upper and lower bounds
Parameters
----------
control : jnp.ndarray
raw control vector
Returns
-------
jnp.ndarray
clipped control vector
"""
if self.control_bounds_low is not None or self.control_bounds_high is not None:
control = jnp.clip(control, self.control_bounds_low, self.control_bounds_high) # type: ignore
return control
def _get_state(self, input_state) -> jnp.ndarray:
"""Converts the global state to an internal RTA state"""
assert isinstance(input_state, (np.ndarray, jnp.ndarray)), (
"input_state must be an RTAState or numpy array. "
"If you are tying to use a custom state variable, make sure to implement a custom "
"_get_state() method to translate your custom state to an RTAState")
if isinstance(input_state, jnp.ndarray):
return input_state
return to_jnp_array_jit(input_state)
class CascadedRTA(RTAModule):
"""Base class for cascaded RTA systems
"""
def __init__(self, *args: Any, **kwargs: Any):
self.rta_list = self._setup_rta_list()
super().__init__(*args, **kwargs)
def compute_filtered_control(self, input_state: Any, step_size: float, control_desired: np.ndarray) -> np.ndarray:
self.intervening = False
control = control_desired
for rta in self.rta_list:
control = np.copy(rta.filter_control(input_state, step_size, control))
if rta.intervening:
self.intervening = True
return control
@abc.abstractmethod
def _setup_rta_list(self) -> List[RTAModule]:
"""Setup list of RTA objects
Returns
-------
list
list of RTA objects in order from lowest to highest priority
for list of length N, where {i = 1, ..., N}, output of RTA {i} is passed as input RTA {i+1}
"""
raise NotImplementedError()
class BackupControlBasedRTA(ConstraintBasedRTA):
"""Base class for backup control based RTA algorithms
Adds iterfaces for backup controller member
Parameters
----------
backup_controller : RTABackupController
backup controller object utilized by rta module to generate backup control
"""
def __init__(self, *args: Any, backup_controller: RTABackupController, **kwargs: Any):
self.backup_controller = backup_controller
super().__init__(*args, **kwargs)
def backup_control(self, state: jnp.ndarray, step_size: float) -> jnp.ndarray:
"""retrieve safe backup control given the current state
Parameters
----------
state : jnp.ndarray
current rta state of the system
step_size : float
time duration over which backup control action will be applied
Returns
-------
jnp.ndarray
backup control vector
"""
control = self.backup_controller.generate_control(state, step_size)
return self._clip_control_jax(control)
def reset_backup_controller(self):
"""Resets the backup controller to the initial state at the beginning of an episode
"""
self.backup_controller.reset()
def backup_controller_save(self):
"""Save the internal state of the backup controller
Allows trajectory integration with a stateful backup controller
"""
self.backup_controller.save()
def backup_controller_restore(self):
"""Restores the internal state of the backup controller from the last save
Allows trajectory integration with a stateful backup controller
"""
self.backup_controller.restore() | /run-time-assurance-1.0.1.tar.gz/run-time-assurance-1.0.1/run_time_assurance/rta/base.py | 0.94766 | 0.364269 | base.py | pypi |
import torch
import torch.utils.data as data
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import warnings
def create_dataloader(features, targets, batch_size, train_size=0.8,
test_size=0.2, validation_size=0, seed=42,
scale_data=False, **kwargs):
"""Creates a Pytorch compatible dataset of type dataloader. Data is split
in two or three batches consisting depending on the sizes of train, test
and validation split.
:param features: Input (or regressor) or location of file containing the
features for the ML model. If location is supplied the file
must be compatible with numpy.load. Dimensions must be
compatible with torch models, i.e. [samples, features] for
NN or [samples, channels, features] for a CNN.
:type features: array_like or str
:param targets: Targets or or location of file containing the targets. If
location is supplied the file must be compatible with
numpy.load. Dimensions required to be compatible with torch
models, see above.
:type targets: array_like or str
:param batch_size: Size of mini batches.
:type batch_size: int or
:param train_size: Size of training batch. Defaults to 0.8.
:type train_size: int or float
:param test_size: Size of test batch. Defaults to 0.2.
:type test_size: int or float
:param validation_size: Size of validation batch. Defaults to 0.
:type validation_size: int or float
:param seed: Seed for torch random split. Defaults to 42.
:type seed: int
:param scale_data: Whether to scale the data by sklearn StandardScaler.
Follows (x - mean(x)) / std(x). Defaults to False.
:type scale_data: bool
:returns data: Tuple of train, test (and validation) dataloaders
:rtype: tuple of type torch.dataloader
"""
torch.manual_seed(seed)
if isinstance(features, str):
features = np.load(features)
if len(features.shape) == 3:
features = features[:, np.newaxis, :, :]
if isinstance(targets, str):
targets = np.load(targets)
if len(targets.shape) == 1:
targets = targets[:, np.newaxis]
nf = features.shape[0]
nt = targets.shape[0]
assert nf == nt, 'Number of samples for targets and features does not match'
if isinstance(train_size, float):
train_size = int(nf * train_size)
if isinstance(test_size, float):
test_size = int(nf * test_size)
if isinstance(validation_size, float):
validation_size = int(nf * validation_size)
if not train_size + test_size + validation_size == nf:
train_size += nf - (train_size + test_size + validation_size)
warnings.warn(
'Train, test and validation size does not add to length of dataset. Added rest of samples to training set.')
if not scale_data:
x = torch.Tensor(features)
y = torch.Tensor(targets)
dataset = data.TensorDataset(x, y)
train, test, validation = data.random_split(
dataset, (train_size, test_size, validation_size))
elif scale_data:
scaler = StandardScaler()
# Create a train/test split, test consists of both test and validation
xtrain, xtest, ytrain, ytest = train_test_split(
features, targets, test_size=test_size + validation_size)
# If validation > 0 we split the test set from above to test and validation
if validation_size > 0:
xtest, xval, ytest, yval = train_test_split(
xtest, ytest, test_size=validation_size)
ytrain = scaler.fit_transform(ytrain)
ytest = scaler.transform(ytest)
try:
yval = scaler.transform(yval)
except:
pass
train = data.TensorDataset(torch.Tensor(xtrain), torch.Tensor(ytrain))
test = data.TensorDataset(torch.Tensor(xtest), torch.Tensor(ytest))
dataloader_train = data.DataLoader(train, batch_size=batch_size, **kwargs)
# Settings shuffle to False for test (and validation)
if kwargs['shuffle']:
kwargs['shuffle'] = False
dataloader_test = data.DataLoader(test, batch_size=batch_size, **kwargs)
if validation_size > 0:
if scale_data:
validation = data.TensorDataset(
torch.Tensor(xval), torch.Tensor(yval))
dataloader_valid = data.DataLoader(
validation, batch_size=batch_size, **kwargs)
if validation_size != 0:
return_data = (dataloader_train, dataloader_test, dataloader_valid)
else:
return_data = (dataloader_train, dataloader_test)
return return_data | /run_torch_model-1.0.2-py3-none-any.whl/run_torch_model/tools.py | 0.82887 | 0.751603 | tools.py | pypi |
[](https://travis-ci.org/hz-inova/run_jnb)
[](https://ci.appveyor.com/project/aplamada/run-jnb/branch/master)
[](https://codecov.io/gh/hz-inova/run_jnb)
[](https://opensource.org/licenses/BSD-3-Clause)
# run_jnb
**run_jnb** is a python package and command line tool for parametrising (python3 only) and executing Jupyter notebooks.
- **Source**: [https://github.com/hz-inova/run_jnb](https://github.com/hz-inova/run_jnb)
- **Platform**: Independent
- **Development Status**: Alpha
- **Getting Started**: [](https://mybinder.org/v2/gh/hz-inova/run_jnb/master?filepath=%2Fexample%2FGetting%20Started.ipynb)
## Installation
```sh
pip install run_jnb
```
## Short Description
The package contains two public functions ***possible_parameter*** and ***run_jnb*** (see the docstring).
```python
>>> from run_jnb import possible_parameter, run_jnb
```
***run_jnb*** can be used also as a command line tool and its documentation is available via
```sh
run_jnb -h
```
## Simple Example
Consider the [notebook](example/Power_function.ipynb).
***possible_parameter*** returns a *list* of possible parameters with their name, value and cell index.
The list is alphabetically sorted by the name of the possible parameters.
```python
>>> possible_parameter('./Power_function.ipynb')
[PossibleParameter(name='exponent', value=2, cell_index=7),
PossibleParameter(name='np_arange_args', value={'start': -10, 'stop': 10, 'step': 0.01}, cell_index=4)]
```
***run_jnb*** allows one to easily parametrise and execute a notebook.
```python
# Parametrise the noteboook and not execute the notebook
>>> run_jnb('./Power_function.ipynb', return_mode='parametrised_only', exponent=1)
# Parametrise and execute the notebook
>>> run_jnb('./Power_function.ipynb', return_mode=True, exponent=1)
Output(output_nb_path='.../_run_jnb/Power_function-output.ipynb', error_prompt_number=None,
error_type=None, error_value=None, error_traceback=None)
```
Please see the exported notebook by [only parametrising](example/_run_jnb/Power_function-output.ipynb) and by [parametrising and executing ](example/_run_jnb/Power_function-output%20(1).ipynb) the initial notebook.
Same output can be obtained by using *arg* parameter
```python
>>> run_jnb('.../Power_function.ipynb', return_mode=True, arg='{"exponent":1}')
```
or using the command line tool (the output is returned only in verbose mode and the tuple is serialised as a csv)
```sh
# " can be escaped by \"
$ run_jnb ./Power_function.ipynb -m true -a "{\"exponent\":1}" -vvv
".../_run_jnb/Power_function-output.ipynb",,,,
```
*np_arange_args* and *exponent* can be parametrised
```python
# parametrise using keyword arguments
>>> run_jnb('./Power_function.ipynb', return_mode=True, exponent=3, np_arange_args={'start':-20,'stop':20,'step':0.1})
# parametrise mixing keyword arguments and arg parameter
>>> run_jnb('./Power_function.ipynb', return_mode=True, arg='{"exponent":3}', np_arange_args={'start':-20,'stop':20,'step':0.1})
# parametrise using arg parameter with a json file
>>> run_jnb('./Power_function.ipynb', return_mode=True, arg='./power_function_arg.json')
Output(output_nb_path='.../_run_jnb/Power_function-output (1).ipynb', error_prompt_number=None,
error_type=None, error_value=None, error_traceback=None)
```
where in the last example [*power_function_arg.json*](example/power_function_arg.json) contains
```javascript
{
"exponent": 3,
"np_arange_args": {
"start": -20,
"stop": 20,
"step": 0.1
}
}
```
Please see the [generated notebook](example/_run_jnb/Power_function-output%20(2).ipynb).
If an error is detected during the execution of the generated notebook
```python
>>> run_jnb('./Power_function.ipynb', return_mode=True, exponent=1, np_arange_args={'step':0.1})
Output(output_nb_path='.../_run_jnb/Power_function-output (2).ipynb', error_prompt_number=3,
error_type='TypeError', error_value="Required argument 'start' (pos 1) not found", error_traceback=...)
```
the output provides also the prompt number of the cell where the error was caught and details about the error (please see the [generated notebook](example/_run_jnb/Power_function-output%20(3).ipynb)).
## How it works
For a notebook written in python one can find the possible parameters. This is achieved by parsing the abstract syntax tree of the code cells. A variable can be a possible parameter if:
- it is defined in a cell that contains only comments or assignments,
- its name is not used as a global variable in the current cell (beside the assignment) nor previously.
One can pass arguments as keyword arguments or in a json format (file or string). For safety reasons, in order to avoid any code injection, only json serializable keywords arguments are available. The keyword arguments are firstly encoded in json format using the standard [json encoder](https://docs.python.org/3.6/library/json.html#json.JSONEncoder). The json content is decoded into python objects using the standard [json decoder](https://docs.python.org/3.6/library/json.html#json.JSONDecoder) and it is mapped to a variable assignment by unpacking it. The assignments are appended at the end of the cell where they are initially defined.
For a *jsonable parameter*, i.e. a parameter for which its value can be recovered from its json representation using the standard decoder, the value of the parameter is returned as well. The value is determined in two steps: firstly the assignment is safely evaluated using [ast.literal_eval](https://docs.python.org/3/library/ast.html) and next it is checked if it is a jsonable parameter.
The generated notebook (parametrised or not) can be easily executed. The implementation relies on [nbconvert Executing notebooks](http://nbconvert.readthedocs.io/en/latest/execute_api.html).
## Dependencies
- [python](https://www.python.org): 3.5 or higher
- [nbconvert](http://nbconvert.readthedocs.io): 4.2 or higher
## License
[BSD 3](LICENSE)
## Acknowledgments
[nbrun](https://github.com/tritemio/nbrun) and [nbparameterise](https://github.com/takluyver/nbparameterise) were a source of inspiration.
| /run_jnb-0.1.16.tar.gz/run_jnb-0.1.16/README.md | 0.498291 | 0.953837 | README.md | pypi |
[](https://github.com/psf/black)
[](https://opensource.org/licenses/MIT)
[](https://pypi.org/project/runcon)
[](https://github.com/demmerichs/runcon/actions/workflows/pre-commit.yml)
[](https://github.com/demmerichs/runcon)
[](https://pypi.org/project/runcon)
[](https://github.com/demmerichs/runcon)
[](https://github.com/demmerichs/runcon/actions/workflows/test_coverage.yml)
[](https://github.com/demmerichs/runcon/actions/workflows/test_coverage.yml)
# runcon <!-- omit in toc -->
runcon is an MIT-licensed package that provides a `Config` class with a lot of functionality that helps and simplifies organizing many, differently configured runs (hence the name **Run** **Con**figuration). Its main target audience are scientists and researchers who run many different experiments either in the real world or a computer-simulated environment and want to control the runs through a base configuration as well as save each run's settings in configuration files. The `Config` class helps creating differently configured runs through user-configurable hierarchical configuration layouts, it automatically creates paths for each run which can be used to save results, and it helps in comparing the config files of each run during the step of analyzing and comparing different runs.
This package was developed with Deep Learning experiments in mind. These usually consist of large and complex configurations and will therefore also be the basis for the upcoming examples of usage.
<a id="toc"></a>
- [Installation](#installation)
- [Basic Usage](#basic-usage)
- [Loading configurations](#loading-configurations)
- [Accessing configuration values](#accessing-configuration-values)
- [Creating runs](#creating-runs)
- [Organizing runs](#organizing-runs)
# Installation<a id="installation"></a> [`↩`](#toc)
runcon is in PyPI, so it can be installed directly using:
```bash
pip install runcon
```
Or from GitHub:
```bash
git clone https://github.com/demmerichs/runcon.git
cd runcon
pip install .
```
# Basic Usage<a id="basic-usage"></a> [`↩`](#toc)
This package builts upon `PyYAML` as a parser for loading and saving configuration files, therefor you should adhere to the YAML-Syntax when writing your configuration.
## Loading configurations<a id="loading-configurations"></a> [`↩`](#toc)
You can load from a single file:
<!--phmdoctest-share-names-->
```python
from runcon import Config
cfg = Config.from_file("cfgs/file_example.cfg")
print(cfg, end="")
```
produces
```
_CFG_ID: 1d4d313eedb05ae00c98ac8cb0a34946
top_level:
more_levels:
deep_level_list:
- list_value
- null
- 3+4j
- 3.14
- true
```
Or you can load from a directory, in which case the filenames will become the toplevel keys. The following layout
```bash
cfgs
├── dir_example
│ ├── forest.cfg
│ └── garden.cfg
```
with the following code
```python
cfg = Config.from_dir("cfgs/dir_example", file_ending=".cfg")
print(cfg, end="")
```
produces
```
_CFG_ID: 705951e95af9b1f6cf314e0f96835349
forest:
trees: 1000
animals: 20
garden:
trees: 2
animals: 0
```
Another way to load multiple configuration files at once is by specifying all the files and their corresponding keys manually.
```python
key_file_dict = {
"black_forest": "cfgs/dir_example/forest.cfg",
"random_values": "cfgs/file_example.cfg",
}
cfg = Config.from_key_file_dict(key_file_dict)
print(cfg, end="")
```
produces
```
_CFG_ID: 60b454fb7619eb972cec13e99ff6addf
black_forest:
trees: 1000
animals: 20
random_values:
top_level:
more_levels:
deep_level_list:
- list_value
- null
- 3+4j
- 3.14
- true
```
## Accessing configuration values<a id="accessing-configuration-values"></a> [`↩`](#toc)
The `Config` object inherets `AttrDict` (a support class by `runcon`). Therefore, values can either be accessed the same way as in a `dict`, or via attribute-access.
Additionally, `Config` supports access via string-concatenation of the keys using a dot as delimiter, e.g.
```python
>>> from runcon import Config
>>> cfg = Config({
... "top": {
... "middle": {"bottom": 3.14},
... "cfg": "value",
... }
... })
>>> print(cfg.top.middle["bottom"])
3.14
>>> print(cfg["top"].cfg)
value
>>> print(cfg["top.middle.bottom"])
3.14
```
## Creating runs<a id="creating-runs"></a> [`↩`](#toc)
Most projects managing multiple runs do this by manually labeling different configuration setups for each run. The main drawbacks of this approach for a larger set of runs are:
- non-deterministic: Different people might label the same configuration differently or different configurations the same way. Even the same person might not remember after a week which settings exactly were changed based on their labeling.
- non-descriptive: In complex configurations a short label cannot capture all setting changes. Finding these via a diff-view can become daunting and unstructured, making it complicated to easliy grasp all the changes made.
Together with this package we propose an alternate way of structuring runs and configurations and trading of slightly longer "labels" for the removal of the above drawbacks.
Most projects start with a single default configuration, and going from there apply one or more change of settings to produce differently configured runs.
We suggest moving all this information into one or multiple configuration files, e.g. a single default configuration, and multiple named setting changes:
```yaml
# dl_example.cfg
default:
model:
name: ResNet
layers: 50
batchsize: 16
optimizer:
name: Adam
learningrate: 1e-3
loss: MSE
small_net:
model:
layers: 5
large_net:
model:
layers: 100
alex:
model:
name: AlexNet
optimizer:
name: SGD
large_bs:
batchsize: 64
optimizer:
learningrate: 4e-3
```
You could now create in code your run configuration like this (but not miss the shortcut after this example):
```python
from copy import deepcopy
base_cfgs = Config.from_file("cfgs/dl_example.cfg")
cfg = deepcopy(base_cfgs.default)
# rupdate works similar to dict.update, but recursivly updates lower layers
cfg.rupdate(base_cfgs.large_net)
cfg.rupdate(base_cfgs.alex)
cfg.loss = "SmoothL1"
cfg.optimizer.learningrate = 1e-4
print(cfg, end="")
```
produces
```
_CFG_ID: be99468b9911c12ccba140ae5d9f487a
model:
name: AlexNet
layers: 100
batchsize: 16
optimizer:
name: SGD
learningrate: 0.0001
weightdecay: 1.0e-06
loss: SmoothL1
```
As this pattern of stacking/merging configurations and possibly modifying a few single values is very common or at least the intended way for using this package, there is a simple shortcut function which operates on string input such that a CLI parser can easily pass values to this function.
For example, you might want to run a script specifying the above constructed configuration like this:
```
python your_runner_script.py \
--cfg default large_net alex \
--set \
loss SmoothL1 \
optimizer.learningrate 1e-4
```
The details of how your CLI interface should look and how you want to parse the values is left to you, (e.g. you could leave out `default` if you have only a single default configuration and just add it inside your code after CLI invocation), but parsing the above command options into the following all-strings variables
<!--phmdoctest-share-names-->
```python
cfg_chain = ["default", "large_net", "alex"]
set_values = [
"loss", "SmoothL1",
"optimizer.learningrate", "1e-4",
]
```
would allow you to call
<!--phmdoctest-share-names-->
```python
base_cfgs = Config.from_file("cfgs/dl_example.cfg")
cfg = base_cfgs.create(cfg_chain, kv=set_values)
print(cfg, end="")
```
and produces (using internally `ast.literal_eval` to parse non-string values, like booleans or floats, in this example `1e-4`)
```
_CFG_ID: be99468b9911c12ccba140ae5d9f487a
model:
name: AlexNet
layers: 100
batchsize: 16
optimizer:
name: SGD
learningrate: 0.0001
weightdecay: 1.0e-06
loss: SmoothL1
```
The resulting label for this configuration would then consist of the configuration chain and the single key-value pairs, and can be automatically reconstructed from the base configs, e.g.
```python
print(cfg.create_auto_label(base_cfgs))
```
produces
```
default alex large_net -s optimizer.learningrate 0.0001 loss SmoothL1
```
Given the run configuration and the set of base configurations, this label can always deterministically be created, and making it shorter is just a matter of wrapping more key-value pairs or base configs into meta configurations.
For the above example this could mean just adding a `smoothl1` sub config which also changes the learning rate, e.g.
```python
base_cfgs.smoothl1 = Config({"loss": "SmoothL1", "optimizer": {"learningrate": 0.0001}})
print(cfg.create_auto_label(base_cfgs))
```
produces
```
default smoothl1 alex large_net
```
This approach mitigates both drawbacks mentioned earlier. The labels are deterministic, and based on the labels, it is quite easy to read of the changes made to the default configuration, as the label itself describes hierarchical changes and the base configurations modifying the default configuration are considered to be minimalistic.
## Organizing runs<a id="organizing-runs"></a> [`↩`](#toc)
After creating your run configuration in your script, it is time to create a directory for your new run, and using it to dump your results from that run.
<!--phmdoctest-share-names-->
```python
cfg_dir = cfg.initialize_cfg_path(base_path="/tmp/Config_test", timestamp=False)
print(type(cfg_dir), cfg_dir)
```
produces
```
<class 'pathlib.PosixPath'> /tmp/Config_test/8614010d20024c05f815cc8edcc8982f
```
The path mainly consists of two parts, a time stamp allowing you to store multiple runs with the same configuration (if you specify `timestampe=True`), and a hash produced by the configuration. Assuming hash collisions are too rare to be ever a problem, two configurations that differ somehow, will always produce different hashes. The hash is used, as it only depends on the configuration, whereas the automatic labeling depends also on the base configuration. The previous section demonstrated, how a change in the base configurations can produce a change in the automatic label. The `initialize_cfg_path` routine also produces a `description` folder next to the configuration folders, where symlinks are stored to the configuration folders, but with the automatic labels. This ensures, that the symlinks can easily be recreated based on a changed configuration, without the need to touch the actual run directories.
Another thing that happens during the path initialization is a call to `cfg.finalize()`. This should mimic the behavior of making all values constant and ensures that the configuration file that was created on disk actually represents all values used during the run execution, and accidental in-place value changes can be mostly ruled out.
```python
try:
cfg.loss = "new loss"
except ValueError as e:
print(e)
print(cfg.loss)
cfg.unfinalize()
cfg.loss = "new loss"
print(cfg.loss)
```
produces
```
This Config was already finalized! Setting attribute or item with name loss to value new loss failed!
SmoothL1
new loss
```
# License <!-- omit in toc -->
runcon is released under a MIT license.
| /runcon-1.2.0a2.tar.gz/runcon-1.2.0a2/README.md | 0.662469 | 0.963678 | README.md | pypi |
from .relational import One2many
from .record import Record
from .defaults import _sentinelle
from copy import deepcopy
from .tools.serialization import get_random_id
class RecordSet:
def __init__(self, table, key="id", one2many={}):
self._table = table
self._data = {}
if not isinstance(key, str):
raise Exception("key must be of type str")
self._key = key
self._one2many = one2many
def _split_key_data(self, item):
if not isinstance(item, dict):
raise Exception("Item must be of type dict")
copy = deepcopy(item)
# Handle with default or raise Exception?
key = copy.pop(self._key, _sentinelle)
if key is _sentinelle:
key = get_random_id()
return key, copy
def empty_copy(self):
return RecordSet(
table=self._table,
key=self._key,
one2many=self._one2many
)
def append(self, item):
"""
Add a dict item. Key value must be inside the dict
"""
key, data = self._split_key_data(item)
self[key] = data
return self[key]
def update(self, record):
"""
Update the value of the record in whole program
"""
self._data[record.key].update(record._attributes)
def save(self, record):
"""
Update the value of the record. No concurrency issue on the record
"""
self._data[record.key] = record._attributes
def __bool__(self):
return bool(self._data)
def __str__(self):
return str(list(self._data.keys()))
def __repr__(self):
return str(self)
def get(self, key):
"""
Same as [] operator but ensure that changing record value won't change value in recordset
"""
value = self._data.get(key, _sentinelle)
if value is _sentinelle:
return Record(self._table, key, {}, one2many=self._one2many)
return Record(self._table, key, deepcopy(value), one2many=self._one2many)
def __getitem__(self, key):
if not isinstance(key, str):
raise Exception("Indexation is not supported, use record's key")
value = self._data.get(key, _sentinelle)
if value is _sentinelle:
value = {}
self._data[key] = {}
return Record(self._table, key, value, one2many=self._one2many)
def __setitem__(self, key, item):
if isinstance(item, Record):
data = item._attributes
if key != item.key:
data = deepcopy(data)
else:
_, data = self._split_key_data(item)
self._data[key] = data
def __len__(self):
return len(self._data)
def __iter__(self):
return self.records()
def records(self):
for key, value in self._data.items():
yield Record(self._table, key, value, one2many=self._one2many)
def keys(self):
return self._data.keys()
def extract(self, keys):
recordset = self.empty_copy()
if isinstance(keys, str):
keys = [keys]
for k in keys:
recordset[key] = self[k]._attributes
return recordset
def filter(self, condition = None):
recordset = self.empty_copy()
if condition is None:
condition = bool
for rec in self:
if condition(rec):
recordset[rec.key] = rec._attributes
return recordset
def __add__(self, recordset):
if not isinstance(recordset, RecordSet):
raise Exception("Can only add 2 recordset together")
res = self.empty_copy()
res._data = {**self._data, **recordset._data}
return res
def __iadd__(self, recordset):
res = self + recordset
self._data = res._data
return self | /rundb-0.6-py3-none-any.whl/RunDB/recordset.py | 0.67405 | 0.151969 | recordset.py | pypi |
__docformat__ = "restructuredtext en"
def child2dict(el):
"""Turns an ElementTree.Element's children into a dict using the node names as dict keys and
and the node text as dict values
:Parameters:
el : ElementTree.Element
:return: a dictionary of element key(tag name)/value(node text) pairs
:rtype: dict
"""
return {c.tag: c.text for c in el}
def attr2dict(el):
"""Turns an elements attrib dict into... wait for it... a dict. Yea, it's silly to me too.
But, according to the ElementTree docs, using the Element.attrib attribute directly
is not recommended - don't look at me - I just work here.
:Parameters:
el : ElementTree.Element
:return: a dictionary of element attrib key/value pairs
:rtype: dict
"""
return {k: v for k, v in el.items()}
def node2dict(el):
"""Combines both the attr2dict and child2dict functions
"""
return dict(list(attr2dict(el).items()) + list(child2dict(el).items()))
def cull_kwargs(api_keys, kwargs):
"""Strips out the api_params from kwargs based on the list of api_keys
!! modifies kwargs inline
:Parameters:
api_keys : list | set | tuple
an iterable representing the keys of the key value pairs to pull out of kwargs
kwargs : dict
a dictionary of kwargs
:return: a dictionary the API params
:rtype: dict
"""
return {k: kwargs.pop(k) for k in api_keys if k in kwargs}
def dict2argstring(argString):
"""Converts an argString dict into a string otherwise returns the string unchanged
:Parameters:
argString : str | dict
argument string to pass to job - if str, will be passed as-is else if dict will be
converted to compatible string
:return: an argString
:rtype: str
"""
if isinstance(argString, dict):
return ' '.join(['-' + str(k) + ' ' + str(v) for k, v in argString.items()])
else:
return argString
try:
if isinstance('', basestring):
pass
except NameError:
# python 3
StringType = type('')
else:
# python 2
StringType = basestring | /rundeckrun-0.2.1.tar.gz/rundeckrun-0.2.1/rundeck/util.py | 0.852813 | 0.619615 | util.py | pypi |
===============================
runenv
===============================
.. image:: https://img.shields.io/travis/onjin/runenv.svg
:target: https://travis-ci.org/onjin/runenv
.. image:: https://img.shields.io/pypi/v/runenv.svg
:target: https://pypi.python.org/pypi/runenv
.. image:: https://img.shields.io/badge/license-New%20BSD-blue.svg
:target: https://github.com/onjin/runenv/blob/master/LICENSE
.. image:: https://img.shields.io/pypi/dm/runenv.svg
:target: https://pypi.python.org/pypi/runenv
Wrapper to run programs with modified environment variables loaded from given file. You can use *runenv* to manage your
app settings using 12-factor_ principles.
You can use same environment file with **runenv** and with **docker** using `env-file`_ parameter
.. _env-file: https://docs.docker.com/reference/commandline/cli/
.. _12-factor: http://12factor.net/
* Free software: BSD license
* Documentation: https://runenv.readthedocs.org.
--------
Features
--------
CLI:
* command-line tool to load environment variables from given file
Python API:
* load variables from a file (`.env` or passed filename)
* load only variables with given `prefix`
* `prefix` can be stripped during load
* detect whether environment was loaded by `runenv` CLI
* force load even if `runenv` CLI was used
* `search_parent` option which allows to look for `env_file` in parent dirs
------------
Installation
------------
In order to install use `pip`
.. code-block:: console
$ pip install -U runenv
-----
Usage
-----
Run from shell
.. code-block:: console
$ runenv env.development ./manage.py runserver
example `env.development` file
.. code-block:: python
BASE_URL=http://127.0.0.1:8000
DATABASE_URI=postgres://postgres:password@localhost/dbname
SECRET_KEY=y7W8pbRcuPuAmgTHsJtEpKocb7XPcV0u
# email settings
EMAIL_HOST=smtp.mandrillapp.com
EMAIL_PORT=587
EMAIL_HOST_USER=someuser
EMAIL_HOST_PASSWORD=hardpassword
EMAIL_FROM=dev@local.host
EMAIL_USE_TLS=1
----------
Python API
----------
**load_env(env_file='.env', prefix=None, strip_prefix=True, force=False, search_parent=0)**
Loads environment from given ``env_file``` (default `.env`).
Options:
+--------------+---------+--------------------------------------------------------------------------------+
| option | default | description |
+==============+=========+================================================================================+
| env_file | `.env` | relative or absolute path to file with environment variables |
+--------------+---------+--------------------------------------------------------------------------------+
| prefix | `None` | prefix to match variables e.g. `APP_` |
+--------------+---------+--------------------------------------------------------------------------------+
| strip_prefix | `True` | should the prefix be stripped during loa |
+--------------+---------+--------------------------------------------------------------------------------+
| force | `False` | load env_file, even though `runenv` CLI command was used |
+--------------+---------+--------------------------------------------------------------------------------+
| search_parent| `0` | To what level traverse parents in search of file |
+--------------+---------+--------------------------------------------------------------------------------+
If ``prefix`` option is provided only variables starting with it will be loaded to environment, with their keys stripped of that prefix. To preserve prefix, you can set ``strip_prefix`` to ``False``.
Example
.. code-block:: console
$ echo 'APP_SECRET_KEY=bzemAG0xfdMgFrHBT3tJBbiYIoY6EeAj' > .env
.. code-block:: python
$ python
>>> import os
>>> from runenv import load_env
>>> load_env(prefix='APP_')
>>> 'APP_SECRET_KEY' in os.environ
False
>>> 'SECRET_KEY' in os.environ
True
>>> load_env(prefix='APP_', strip_prefix=False)
>>> 'APP_SECRET_KEY' in os.environ
True
**Notice**: Environment will not be loaded if command was fired by `runenv` wrapper, unless you set the **force** parameter to **True**
``load_env`` does not load variables when wrapper ``runenv`` is used. Also ``_RUNENV_WRAPPED`` is set to ``1``
Example
.. code-block:: console
$ echo 'APP_SECRET_KEY=bzemAG0xfdMgFrHBT3tJBbiYIoY6EeAj' > .env
.. code-block:: python
$ python
>>> import os
>>> from runenv import load_env
>>> os.environ['_RUNENV_WRAPPED'] = '1'
>>> load_env()
>>> 'APP_SECRET_KEY' in os.environ
False
>>> load_env(force=True)
>>> 'APP_SECRET_KEY' in os.environ
True
Django/Flask integration
------------------------
To use ``load_env`` with `Django`_ or `Flask`_, put the followin in ``manage.py`` and ``wsgi.py``
.. code-block:: python
from runenv import load_env
load_env()
.. _django: http://djangoproject.com/
.. _flask: http://flask.pocoo.org/
Similar projects
----------------
* https://github.com/jezdez/envdir - runs another program with a modified environment according to files in a specified directory
* https://github.com/theskumar/python-dotenv - Reads the key,value pair from .env and adds them to environment variable
| /runenv-1.0.1.tar.gz/runenv-1.0.1/README.rst | 0.762513 | 0.668718 | README.rst | pypi |
from datetime import datetime, timezone
from rs3_api.httpService.http_request import http_get
from .endpoints import GRAND_EXCHANGE_API_ENDPOINTS
from .utils.jagex import abbrv_price_to_num, is_int, is_str, is_valid_category, unwrap_category_dict
class GrandExchange:
""" Grand Exchange """
def get_catalogue(self, categoryId: int) -> dict:
""" Gets the number of items determined by the first letter in category.
:param int categoryId: id of the category
:rtype dict
:raises Exception: if category is an invalid integer.
:raises TypeError: if category argument is not an integer.
"""
# Argument type validation
is_int(categoryId)
is_valid_category(categoryId)
response = http_get(GRAND_EXCHANGE_API_ENDPOINTS['catalogue'].format(categoryId = categoryId))
content = unwrap_category_dict(response.json())
return content
def get_runedate(self) -> dict:
""" Return the runedate of when the grand exchange was last updated
rtype: dict
"""
response = http_get(GRAND_EXCHANGE_API_ENDPOINTS['runedate'])
return response.json()
def get_items(self, categoryId: int, searchString: str, page: int = 1) -> dict:
""" Gets twelve items determined by category and first letters of search string
:param int categoryId
:param str searchString: search for items that start with this string
:page int: which page of twelve to fetch, default = 1
:raises TypeError: if parameters are of the wrong type
:raises Exception: if categoryId is an invalid integer
"""
# Argument validation
is_int(categoryId, page)
is_str(searchString)
is_valid_category(categoryId)
response = http_get(GRAND_EXCHANGE_API_ENDPOINTS['items']
.format(categoryId = categoryId, searchString = searchString.lower(), page = page))
return response.json()
def get_item_detail(self, itemId: int) -> dict:
""" Returns current price and price trends information on tradeable items in the Grand Exchange,
the category, item image and examine for the given item
:param int itemId
:raises TypeError: if itemId is of the wrong type
"""
# Argument validation
is_int(itemId)
response = http_get(GRAND_EXCHANGE_API_ENDPOINTS['item_detail'].format(itemId=itemId))
content = response.json()['item']
# Get unabbreviated prices.
current_price = abbrv_price_to_num(str(content['current']['price']))
today_price = abbrv_price_to_num(str(content['today']['price']))
# Create new key with int value of the price.
content['current']['price_num'] = current_price
content['today']['price_num'] = today_price
return content
def get_item_graph(self, itemId: int) -> dict:
""" Graph shows the prices each day of a given item for the previous 180 days.
When no price information is available, then a value of zero is returned.
:param int itemId
:raises TypeError: if itemId is of the wrong type
"""
is_int(itemId)
response = http_get(GRAND_EXCHANGE_API_ENDPOINTS['graph'].format(itemId=itemId))
content = response.json()
# Daily is the item trade history over the past day
# Average is the item trade history for today
ret_dict = {'daily': [], 'average': []}
for key, value in content['daily'].items():
temp_dict = {}
seconds = int(key) / 1000.0
temp_dict['epoch'] = datetime.fromtimestamp(seconds, timezone.utc)
temp_dict['price'] = value
ret_dict['daily'].append(temp_dict)
for key, value in content['average'].items():
temp_dict = {}
seconds = int(key) / 1000.0
temp_dict['epoch'] = datetime.fromtimestamp(seconds, timezone.utc)
temp_dict['price'] = value
ret_dict['average'].append(temp_dict)
return ret_dict | /runescape3_api-2.0.0-py3-none-any.whl/rs3_api/grand_exchange.py | 0.69987 | 0.283124 | grand_exchange.py | pypi |
SKILLS = [
"overall",
"attack",
"defence",
"strength",
"constitution",
"ranged",
"prayer",
"magic",
"cooking",
"woodcutting",
"fletching",
"fishing",
"firemaking",
"crafting",
"smithing",
"mining",
"herblore",
"agility",
"thieving",
"slayer",
"farming",
"runecrafting",
"hunter",
"construction",
"summoning",
"dungeoneering",
"divination",
"invention",
"archaeology",
]
ACTIVITIES = [
"bounty hunter",
"b.h. rogues",
"dominion tower",
"the crucible",
"castle wars games",
"b.a. attackers",
"b.a. defenders",
"b.a. collectors",
"b.a. healers",
"duel tournament",
"mobilising armies",
"conquest",
"fist of guthix",
"gg: athletics",
"gg: resource race",
"we2: armadyl lifetime contribution",
"we2: bandos lifetime contribution",
"we2: armadyl pvp kills",
"we2: bandos pvp kills",
"heist guard level",
"heist robber level",
"cfp: 5 game average",
"af15: cow tipping",
"af15: rats killed after the miniquest",
"runescore",
"clue scrolls easy",
"clue scrolls medium",
"clue scrolls hard",
"clue scrolls elite",
"clue scrolls master",
]
PLAYER = {
"name": "",
"skills": {
"overall": { "rank": 0, "level": 1, "experience": 0 },
"attack": { "rank": 0, "level": 1, "experience": 0 },
"defence": { "rank": 0, "level": 1, "experience": 0 },
"strength": { "rank": 0, "level": 1, "experience": 0 },
"constitution": { "rank": 0, "level": 1, "experience": 0 },
"ranged": { "rank": 0, "level": 1, "experience": 0 },
"prayer": { "rank": 0, "level": 1, "experience": 0 },
"magic": { "rank": 0, "level": 1, "experience": 0 },
"cooking": { "rank": 0, "level": 1, "experience": 0 },
"woodcutting": { "rank": 0, "level": 1, "experience": 0 },
"fletching": { "rank": 0, "level": 1, "experience": 0 },
"fishing": { "rank": 0, "level": 1, "experience": 0 },
"firemaking": { "rank": 0, "level": 1, "experience": 0 },
"crafting": { "rank": 0, "level": 1, "experience": 0 },
"smithing": { "rank": 0, "level": 1, "experience": 0 },
"mining": { "rank": 0, "level": 1, "experience": 0 },
"herblore": { "rank": 0, "level": 1, "experience": 0 },
"agility": { "rank": 0, "level": 1, "experience": 0 },
"thieving": { "rank": 0, "level": 1, "experience": 0 },
"slayer": { "rank": 0, "level": 1, "experience": 0 },
"farming": { "rank": 0, "level": 1, "experience": 0 },
"runecrafting": { "rank": 0, "level": 1, "experience": 0 },
"hunter": { "rank": 0, "level": 1, "experience": 0 },
"construction": { "rank": 0, "level": 1, "experience": 0 },
"summoning": { "rank": 0, "level": 1, "experience": 0 },
"dungeoneering": { "rank": 0, "level": 1, "experience": 0 },
"divination": { "rank": 0, "level": 1, "experience": 0 },
"invention": { "rank": 0, "level": 1, "experience": 0 },
"archaeology": { "rank": 0, "level": 1, "experience": 0 },
},
"activities": {
"bounty hunter": { "rank": 0, "score": 0 },
"b.h. rogues": { "rank": 0, "score": 0 },
"dominion tower": { "rank": 0, "score": 0 },
"the crucible": { "rank": 0, "score": 0 },
"castle wars games": { "rank": 0, "score": 0 },
"b.a. attackers": { "rank": 0, "score": 0 },
"b.a. defenders": { "rank": 0, "score": 0 },
"b.a. collectors": { "rank": 0, "score": 0 },
"b.a. healers": { "rank": 0, "score": 0 },
"duel tournament": { "rank": 0, "score": 0 },
"mobilising armies": { "rank": 0, "score": 0 },
"conquest": { "rank": 0, "score": 0 },
"fist of guthix": { "rank": 0, "score": 0 },
"gg: athletics": { "rank": 0, "score": 0 },
"gg: resource race": { "rank": 0, "score": 0 },
"we2: armadyl lifetime contribution": { "rank": 0, "score": 0 },
"we2: bandos lifetime contribution": { "rank": 0, "score": 0 },
"we2: armadyl pvp kills": { "rank": 0, "score": 0 },
"we2: bandos pvp kills": { "rank": 0, "score": 0 },
"heist guard level": { "rank": 0, "score": 0 },
"heist robber level": { "rank": 0, "score": 0 },
"cfp: 5 game average": { "rank": 0, "score": 0 },
"af15: cow tipping": { "rank": 0, "score": 0 },
"af15: rats killed after the miniquest": { "rank": 0, "score": 0 },
"runescore": { "rank": 0, "score": 0 },
"clue scrolls easy": { "rank": 0, "score": 0 },
"clue scrolls medium": { "rank": 0, "score": 0 },
"clue scrolls hard": { "rank": 0, "score": 0 },
"clue scrolls elite": { "rank": 0, "score": 0 },
"clue scrolls master": { "rank": 0, "score": 0 },
}
}
class GECategories:
MISCELLANEOUS = 0
AMMO = 1
ARROWS = 2
BOLTS = 3
CONSTRUCTION_MATERIALS = 4
CONSTRUCTION_PRODUCTS = 5
COOKING_INGREDIENTS = 6
COSTUMES = 7
CRAFTING_MATERIALS = 8
FAMILIARS = 9
FARMING_PRODUCE = 10
FLETCHING_MATERIALS = 11
FOOD_AND_DRINK = 12
HERBLORE_MATERIALS = 13
HUTING_EQUIPMENT = 14
HUNTING_PRODUCE = 15
JEWELLERY = 16
MAGE_ARMOUR = 17
MAGE_WEAPONS = 18
MELEE_ARMOUR_LOW_LEVEL = 19
MELEE_ARMOUR_MID_LEVEL = 20
MELEE_ARMOUR_HIGH_LEVEL = 21
MELEE_WEAPONS_LOW_LEVEL = 22
MELEE_WEAPONS_MID_LEVEL = 23
MELEE_WEAPONS_HIGH_LEVEL = 24
MINING_AND_SMITHING = 25
POTIONS = 26
PRAYER_ARMOUR = 27
PRAYER_MATERIALS = 28
RANGE_ARMOUR = 29
RANGE_WEAPONS = 30
RUNECRAFTING = 31
RUNES_SPELLS_TELEPORTS = 32
SEEDS = 33
SUMMONING_SCROLLS = 34
TOOLS_AND_CONTAINERS = 35
WOODCUTTING_PRODUCTS = 36
POCKET_ITEMS = 37
STONE_SPIRITS = 38
SALVAGE = 39
FIREMAKING_PRODUCTS = 40
ARCHEOLOGY_MATERIALS = 41 | /runescape3_api-2.0.0-py3-none-any.whl/rs3_api/models.py | 0.429429 | 0.546436 | models.py | pypi |
from .endpoints import HISCORE_API_ENDPOINTS
from .models import SKILLS, ACTIVITIES
from .utils.jagex import parse_player_to_dict, is_str, is_int
from .httpService.http_request import http_get
class Hiscores:
"""Player Hiscores"""
def get_ranking(self, index: int = 0, category: str = "skill", size: int = 25) -> dict:
"""Gets hiscore ranks in a particular skill or activity
:param int index: index of the skill or activity to fetch
:param str category: 'skill' or 'activity'
:param int size: How many results you want back, default = 25
:rtype: dict
:raises Exception: If there is invalid category argument
"""
# Argument type validation
is_int(index)
# Category must be either 'skill' or 'activity'
if(category != "activity" and category != "skill"):
raise Exception("Invalid category must be: skill | activity")
category_id = 0 if category == "skill" else 1
response = http_get(HISCORE_API_ENDPOINTS['ranking'].format(index = index, category = category_id, size = size))
content = response.json()
skill_or_activity = SKILLS[index] if category_id == 0 else ACTIVITIES[index]
return_dict = {
"category": skill_or_activity,
"rankings": content
}
return return_dict
def get_index_lite(self, game_mode: str, username: str) -> dict:
"""Gets a players hiscore profile
:param str game_mode: Name of players game mode
:param str username: Players username
:rtype: dict
:raises Exception: if api returns a 404 error
"""
# Argument type validation
is_str(game_mode, username)
game_mode = game_mode.lower()
# Check if game mode string is valid
if(not self.__game_mode_validation(game_mode)):
game_mode = "normal"
#Call the requests service
response = http_get(HISCORE_API_ENDPOINTS[game_mode].format(username = username))
#TODO Look into this, probably removable
# If username does not exist it return a html that include an error404 string
if("error404" in response.text):
raise Exception("Runescape API throws error404, most likely username does not exist on hiscore")
# Parse the content from the api into a usable dictionary
content = parse_player_to_dict(response.text, username)
return content
"""Player Season Rankings"""
def get_current_seasonal_ranking(self, username: str) -> list:
"""Gets a players current seasonal stats
:param str username: Players username
:rtype: list
"""
# Argument type checking
is_str(username)
response = http_get(HISCORE_API_ENDPOINTS["season_ranking"].format(username = username))
return response.json()
def get_past_seasonal_ranking(self, username: str) -> list:
"""Gets a players past season stats(archived)
:param str username: Players username
:rtype: list
"""
# Argument type checking
is_str(username)
response = http_get(HISCORE_API_ENDPOINTS["past_season_ranking"].format(username = username))
return response.json()
"""Season Details"""
def get_season_details(self) -> list:
"""Gets details about the current season
rtype: list
"""
response = http_get(HISCORE_API_ENDPOINTS["season_detail"])
return response.json()
def get_past_season_details(self) -> list:
"""Gets details past seasons
rtype: list
"""
response = http_get(HISCORE_API_ENDPOINTS["past_season_detail"])
return response.json()
"""Clans"""
def get_clan_ranking(self) -> list:
"""Returns details about the top 3 clans
rtype: list
"""
response = http_get(HISCORE_API_ENDPOINTS["clan_ranking"])
return response.json()
"""Helper functions"""
def __game_mode_validation(self, game_mode):
if(game_mode == "normal" or game_mode == "ironman" or game_mode == "hardcore"):
return True
else:
return False | /runescape3_api-2.0.0-py3-none-any.whl/rs3_api/hiscores.py | 0.520253 | 0.221025 | hiscores.py | pypi |
__author__ = 'isaiahmayerchak'
#acbart did most of this code, I mostly just changed the template
from docutils import nodes
from docutils.parsers.rst import directives
from runestone.assess import Assessment
from runestone.server.componentdb import addQuestionToDB, addHTMLToDB
from runestone.common.runestonedirective import RunestoneDirective, RunestoneNode
def setup(app):
app.add_directive('shortanswer', JournalDirective)
app.add_node(JournalNode, html=(visit_journal_node, depart_journal_node))
app.add_js_file('shortanswer.js')
app.add_js_file('timed_shortanswer.js')
app.add_config_value('shortanswer_div_class', 'journal alert alert-warning', 'html')
app.add_config_value('shortanswer_optional_div_class', 'journal alert alert-success', 'html')
TEXT = """
<div class="runestone">
<p data-component="shortanswer" class="%(divclass)s" id=%(divid)s %(optional)s>%(qnum)s: %(content)s</p>
</div>
"""
class JournalNode(nodes.General, nodes.Element, RunestoneNode):
def __init__(self, options, **kwargs):
super(JournalNode, self).__init__(**kwargs)
self.journalnode_components = options
def visit_journal_node(self, node):
div_id = node.journalnode_components['divid']
components = dict(node.journalnode_components)
components.update({'divid': div_id})
res = TEXT % components
addHTMLToDB(div_id, components['basecourse'], res)
self.body.append(res)
def depart_journal_node(self,node):
pass
class JournalDirective(Assessment):
"""
.. shortanswer:: uniqueid
:optional:
text of the question goes here
config values (conf.py):
- shortanswer_div_class - custom CSS class of the component's outermost div
"""
required_arguments = 1 # the div id
optional_arguments = 0
final_argument_whitespace = True
has_content = True
option_spec = Assessment.option_spec.copy()
option_spec.update({'optional': directives.flag})
node_class = JournalNode
def run(self):
super(JournalDirective, self).run()
addQuestionToDB(self)
# Raise an error if the directive does not have contents.
self.assert_has_content()
self.options['optional'] = 'data-optional' if 'optional' in self.options else ''
self.options['content'] = "<p>".join(self.content)
self.options['qnum'] = self.getNumber()
journal_node = JournalNode(self.options, rawsource=self.block_text)
journal_node.source, journal_node.line = self.state_machine.get_source_and_line(self.lineno)
env = self.state.document.settings.env
if self.options['optional']:
self.options['divclass'] = env.config.shortanswer_optional_div_class
else:
self.options['divclass'] = env.config.shortanswer_div_class
return [journal_node] | /runestone-petljadoc-3.1.2.5.tar.gz/runestone-petljadoc-3.1.2.5/runestone/shortanswer/shortanswer.py | 0.453262 | 0.182134 | shortanswer.py | pypi |
__author__ = 'isaiahmayerchak'
from docutils import nodes
from docutils.parsers.rst import directives
from runestone.common.runestonedirective import RunestoneDirective, RunestoneNode
def setup(app):
#Add directives/javascript/css
app.add_directive('tabbed', TabbedStuffDirective)
app.add_directive('tab', TabDirective)
app.add_node(TabNode, html=(visit_tab_node, depart_tab_node))
app.add_node(TabbedStuffNode, html=(visit_tabbedstuff_node, depart_tabbedstuff_node))
app.add_js_file('tabbedstuff.js')
app.add_css_file('tabbedstuff.css')
app.add_config_value('tabbed_div_class', 'alert alert-warning', 'html')
#Templates to be formatted by node options
BEGIN = """<div id='%(divid)s' data-component="tabbedStuff" %(inactive)s class='%(divclass)s'>"""
TABDIV_BEGIN = """<div data-component="tab" data-tabname="%(tabname)s" %(active)s>
"""
TABDIV_END = """</div>"""
END = """
</div>
"""
class TabNode(nodes.General, nodes.Element):
def __init__(self, content, **kwargs):
super(TabNode, self).__init__(**kwargs)
self.tabnode_options = content
self.tabname = content['tabname']
def visit_tab_node(self, node):
#Set options and format templates accordingly
divid = node.parent.divid
tabname = node.tabname
if 'active' in node.tabnode_options:
node.tabnode_options['active'] = 'data-active'
else:
node.tabnode_options['active'] = ''
res = TABDIV_BEGIN % {'divid':divid,
'tabname':tabname,
'active': node.tabnode_options['active']}
self.body.append(res)
def depart_tab_node(self,node):
#Set options and format templates accordingly
self.body.append(TABDIV_END)
class TabbedStuffNode(nodes.General, nodes.Element, RunestoneNode):
'''A TabbedStuffNode contains one or more TabNodes'''
def __init__(self, content, **kwargs):
super(TabbedStuffNode,self).__init__(**kwargs)
self.tabbed_stuff_options = content
self.divid = content['divid']
def visit_tabbedstuff_node(self, node):
divid = node.divid
if 'inactive' in node.tabbed_stuff_options:
node.tabbed_stuff_options['inactive'] = 'data-inactive'
else:
node.tabbed_stuff_options['inactive'] = ''
res = BEGIN % {'divid':divid,
'divclass':node.tabbed_stuff_options['divclass'],
'inactive':node.tabbed_stuff_options['inactive']}
self.body.append(res)
def depart_tabbedstuff_node(self,node):
divid = node.divid
res = ""
# close the tab plugin div and init the Bootstrap tabs
res += END
res = res % {'divid':divid}
self.body.append(res)
class TabDirective(RunestoneDirective):
"""
.. tab:: identifier
:active: Optional flag that specifies this tab to be opened when page is loaded (default is first tab)--overridden by :inactive: flag on tabbedStuff
Content
...
config values (conf.py):
- tabbed_div_class - custom CSS class of the component's outermost div
"""
required_arguments = 1 # the name of the tab
optional_arguments = 0
final_argument_whitespace = True
has_content = True
option_spec = {'active':directives.flag}
node_class = TabNode
def run(self):
"""
process the tab directive and generate html for output.
:param self:
:return:
.. tab:: identifier
:active: Optional flag that specifies this tab to be opened when page is loaded (default is first tab)--overridden by :inactive: flag on tabbedStuff
Content
...
"""
# Raise an error if the directive does not have contents.
self.assert_has_content()
# Create the node, to be populated by "nested_parse".
self.options['tabname'] = self.arguments[0]
tab_node = TabNode(self.options, rawsource=self.block_text)
# Parse the child nodes (content of the tab)
self.state.nested_parse(self.content, self.content_offset, tab_node)
return [tab_node]
class TabbedStuffDirective(RunestoneDirective):
"""
.. tabbed:: identifier
:inactive: Optional flag that calls for no tabs to be open on page load
Content (put tabs here)
...
config values (conf.py):
- tabbed_div_class - custom CSS class of the component's outermost div
"""
required_arguments = 1 # the div to put the tabbed exhibit in
optional_arguments = 0
final_argument_whitespace = True
has_content = True
option_spec = {'inactive':directives.flag}
def run(self):
"""
process the tabbedStuff directive and generate html for output.
:param self:
:return:
.. tabbed:: identifier
:inactive: Optional flag that calls for no tabs to be open on page load
Content (put tabs here)
...
"""
# Raise an error if the directive does not have contents.
self.assert_has_content()
self.options['divid'] = self.arguments[0]
env = self.state.document.settings.env
self.options['divclass'] = env.config.tabbed_div_class
# Create the node, to be populated by "nested_parse".
tabbedstuff_node = TabbedStuffNode(self.options, rawsource=self.block_text)
tabbedstuff_node.source, tabbedstuff_node.line = self.state_machine.get_source_and_line(self.lineno)
# Parse the directive contents (should be 1 or more tab directives)
self.state.nested_parse(self.content, self.content_offset, tabbedstuff_node)
return [tabbedstuff_node] | /runestone-petljadoc-3.1.2.5.tar.gz/runestone-petljadoc-3.1.2.5/runestone/tabbedStuff/tabbedStuff.py | 0.494873 | 0.231593 | tabbedStuff.py | pypi |
__author__ = 'tconzett'
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst import Directive
from runestone.server.componentdb import addQuestionToDB, addHTMLToDB
from runestone.common.runestonedirective import RunestoneIdDirective
def setup(app):
app.add_directive('showeval', ShowEval)
app.add_js_file('showEval.js')
app.add_css_file('showEval.css')
app.add_config_value('showeval_div_class', 'runestone explainer alert alert-warning', 'html')
CODE = """\
<div data-childcomponent="showeval" class="%(divclass)s">
<button class="btn btn-success" id="%(divid)s_nextStep">Next Step</button>
<button class="btn btn-default" id ="%(divid)s_reset">Reset</button>
<div class="evalCont" style="background-color: #FDFDFD;">%(preReqLines)s</div>
<div class="evalCont" id="%(divid)s"></div>
</div>
"""
SCRIPT = """\
<script>
$(document).ready(function() {
steps = %(steps)s;
%(divid)s_object = new SHOWEVAL.ShowEval($('#%(divid)s'), steps, %(trace_mode)s);
%(divid)s_object.setNextButton('#%(divid)s_nextStep');
%(divid)s_object.setResetButton('#%(divid)s_reset');
});
</script>
"""
class ShowEval(RunestoneIdDirective):
"""
.. showeval:: unique_id_goes_here
:trace_mode: boolean <- Required option that enables 'Trace Mode'
some code
more code
~~~~
more {{code}}{{what code becomes in step 1}}
more {{what code becomes in step 1}}{{what code becomes in step2}} ##Optional comment for step 2
as many steps as you want {{the first double braces}}{{animate into the second}} wherever.
config values (conf.py):
- showeval_div_class - custom CSS class of the component's outermost div
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
has_content = True
option_spec = {'trace_mode':directives.unchanged_required}
def run(self):
"""
All prerequisite information that should be displayed above the directive,
such as variable declaration, are separated from the step strings by "-----".
The step animations follow the "-----" and are written one per line. Use
"{{" and "}}" braces to surround the part of the line that should be replaced,
followed by the replacement text also in "{{" and "}}". If you would like to add
a comment that will appear in a div beside the animation, denote that at the end
of the step where you would like it to appear with "##".
Example:
.. showeval:: showEval_0
:trace_mode: false
eggs = ['dogs', 'cats', 'moose']
~~~~
''.join({{eggs}}{{['dogs', 'cats', 'moose']}}).upper().join(eggs)
{{''.join(['dogs', 'cats', 'moose'])}}{{'dogscatsmoose'}}.upper().join(eggs) ##I want to put a comment here!
{{'dogscatsmoose'.upper()}}{{'DOGSCATSMOOSE'}}.join(eggs)
'DOGSCATSMOOSE'.join({{eggs}}{{['dogs', 'cats', 'moose']}})
{{'DOGSCATSMOOSE'.join(['dogs', 'cats', 'moose'])}}{{'dogsDOGSCATSMOOSEcatsDOGSCATSMOOSEmoose'}}
"""
# Raise an error if the directive does not have contents.
super(ShowEval, self).run()
addQuestionToDB(self)
self.options['trace_mode'] = self.options['trace_mode'].lower()
self.options['preReqLines'] = ''
self.options['steps'] = []
env = self.state.document.settings.env
self.options['divclass'] = env.config.showeval_div_class
step = False
count = 0
for line in self.content:
if step == True:
if line != '':
self.options['steps'].append(str(line))
elif '~~~~' in line:
step = True
else:
self.options['preReqLines'] += line + '<br />\n'
res = (CODE + SCRIPT) % self.options
addHTMLToDB(self.options['divid'], self.options['basecourse'], res)
return [nodes.raw(self.block_text, res, format='html')] | /runestone-petljadoc-3.1.2.5.tar.gz/runestone-petljadoc-3.1.2.5/runestone/showeval/showeval.py | 0.46563 | 0.194483 | showeval.py | pypi |
SimpleTreeModel = function() //construct the model
{
}
SimpleTreeModel.prototype.init = function(ctl)
{
var model = [
{ nodelist: {C_0: {color: 'blue', style: 'filled'},
H_0: {type: 's', shape: 'record', color: 'blue', label: 'foo'},
H_1: {type: 's'}, H_2: {type: 's'},
C_1: {type: 's'}, H_3: {type: 's'},
H_4: {type: 's'}, H_5: {type: 's'}},
edgelist: {C_0: ['H_0:f1', 'H_1', 'H_2', 'C_1'], C_1: ['H_3', 'H_4', 'H_5']},
params: {node: {shape: 'circle', color: 'red'}, edge: {color: 'blue'}}},
{ nodelist: {C_0: {},
H_0: {type: 's', shape: 'record', color: 'blue', label: 'foo', style: 'filled'},
H_1: {type: 's'}, H_2: {type: 's'},
C_1: {type: 's'}, H_3: {type: 's'},
H_4: {type: 's'}, H_5: {type: 's'}},
edgelist: {C_0: ['H_0:f1', 'H_1', 'H_2', 'C_1'], C_1: ['H_3', 'H_4', 'H_5']},
params: {node: {shape: 'circle', color: 'red'}, edge: {color: 'blue'}}},
{ nodelist: {C_0: {},
H_0: {type: 's', shape: 'record', label: 'foo'},
H_1: {type: 's', style: 'filled', color: 'blue'}, H_2: {type: 's'},
C_1: {type: 's'}, H_3: {type: 's'},
H_4: {type: 's'}, H_5: {type: 's'}},
edgelist: {C_0: ['H_0:f1', 'H_1', 'H_2', 'C_1'], C_1: ['H_3', 'H_4', 'H_5']},
params: {node: {shape: 'circle', color: 'red'}, edge: {color: 'blue'}}},
{ nodelist: {C_0: {},
H_0: {type: 's', shape: 'record', label: 'foo'},
H_1: {type: 's', style: 'filled', color: 'blue'}, H_2: {type: 's'},
C_1: {type: 's'}, H_3: {type: 's'},
H_4: {type: 's'}, H_5: {type: 's'}, B_1: {type: 's', color: 'blue', style: 'filled'}},
edgelist: {C_0: ['H_0:f1', 'H_1', 'H_2', 'C_1'], C_1: ['H_3', 'H_4', 'H_5'], H_1: ['B_1']},
params: {node: {shape: 'circle', color: 'red'}, edge: {color: 'blue'}}},
];
return model
}
TreeViewer = function() //construct the view
{
}
TreeViewer.prototype.init = function(c)
{
this.ctx = c
}
TreeViewer.prototype.render = function(ascene)
{
$('#ancan_div').attr('class','none')
$('#ancan_div').gchart($.gchart.graphviz(true, ascene.nodelist,
ascene.edgelist, ascene.params ))
} | /runestone-petljadoc-3.1.2.5.tar.gz/runestone-petljadoc-3.1.2.5/runestone/animation/simpletree.js | 0.62223 | 0.678852 | simpletree.js | pypi |
(function($) { // Hide scope, no $ conflict
$.extend($.gchart._defaults, {
// Maps -------------------
mapLatLong: false, // True to use lat/long coords in mapArea
mapArea: null, // New maps: (number) pixel border all around or
// (number[4]) individual pixel borders or lat/long
// Original maps: the general area to show:
// world, africa, asia, europe, middle_east, south_america, usa
mapRegions: [], // List of country/state codes to plot
mapDefaultColor: 'bebebe', // The colour for non-plotted countries/states
mapColors: ['blue', 'red'], // The colour range for plotted countries/states
// QR Code ----------------
qrECLevel: null, // Error correction level: low, medium, quarter, high
qrMargin: null // Margin (squares) around QR code, default is 4
});
// New chart types: formula, map, mapOriginal, meter, qrCode, scatter, venn
$.extend($.gchart._chartTypes, {formula: 'tx', map: 'map', mapOriginal: 't',
meter: 'gom', qrCode: 'qr', scatter: 's', venn: 'v',
gom: 'gom', qr: 'qr', s: 's', t: 't', tx: 'tx', v: 'v'});
$.extend($.gchart._typeOptions, {map: 'map', qr: 'qr', t: 'map', tx: 'no'});
$.extend($.gchart._prototype.prototype, {
/* Latitude and longitude coordinates for the continents. */
mapAfrica: [-35, -20, 40, 55],
mapAsia: [-15, 40, 75, 180],
mapAustralia: [-45, 110, -10, 155],
mapEurope: [33, -25, 73, 50],
mapNorthAmerica: [5, -175, 75, -50],
mapSouthAmerica: [-55, -85, 15, -35],
/* Prepare options for a scatter chart.
@param values (number[][2/3]) the coordinates of the points: [0] is the x-coord,
[1] is the y-coord, [2] (optional) is the percentage size
@param minMax (number[2/4]) any minimum and maximum values for the axes (optional)
@param labels (string[]) the labels for the groups (optional)
@param colours (string[]) the colours for the labels (optional)
@param options (object) additional settings (optional)
@return (object) the configured options object */
scatter: function(values, minMax, labels, colours, options) {
if (!$.isArray(minMax)) {
options = minMax;
colours = null;
labels = null;
minMax = null;
}
else if (typeof minMax[0] != 'number') {
options = colours;
colours = labels;
labels = minMax;
minMax = null;
}
if (labels && !$.isArray(labels)) {
options = labels;
colours = null;
labels = null;
}
var series = [[], [], []];
for (var i = 0; i < values.length; i++) {
series[0][i] = values[i][0];
series[1][i] = values[i][1];
series[2][i] = values[i][2] || 100;
}
minMax = minMax || [];
options = options || {};
if (labels) {
options.extension = {chdl: labels.join('|')};
}
if (colours) {
colours = $.map(colours, function(v, i) {
return $.gchart.color(v);
});
$.extend(options.extension, {chco: colours.join('|')});
}
return $.extend({}, options,
{type: 'scatter', encoding: (minMax.length >= 2 ? 'scaled' : 'text'), series: [
(minMax.length >= 2 ? $.gchart.series(series[0], minMax[0], minMax[1]) :
$.gchart.series(series[0])),
(minMax.length >= 4 ? $.gchart.series(series[1],
(minMax[2] != null ? minMax[2] : minMax[0]), (minMax[3] != null ? minMax[3] : minMax[1])) :
$.gchart.series(series[1])), $.gchart.series(series[2])]});
},
/* Prepare options for a Venn diagram.
@param size1 (number) the relative size of the first circle
@param size2 (number) the relative size of the second circle
@param size3 (number) the relative size of the third circle
@param overlap12 (number) the overlap between circles 1 and 2
@param overlap13 (number) the overlap between circles 1 and 3
@param overlap23 (number) the overlap between circles 2 and 3
@param overlap123 (number) the overlap between all circles
@param options (object) additional settings (optional)
@return (object) the configured options object */
venn: function(size1, size2, size3, overlap12, overlap13, overlap23, overlap123, options) {
return $.extend({}, options || {}, {type: 'venn', series:
[$.gchart.series([size1, size2, size3, overlap12, overlap13, overlap23, overlap123])]});
},
/* Prepare options for a Google meter.
@param text (string or string[]) the text to show on the arrow (optional)
@param values (number or number[] or [] of these) the position(s) of the arrow(s)
@param maxValue (number) the maximum value for the meter (optional, default 100)
@param colours (string[]) the colours to use for the band (optional)
@param labels (string[]) labels appearing beneath the meter (optional)
@param styles (number[][4]) the styles of each series' arrows:
width, dash, space, arrow size (optional)
@param options (object) additional settings (optional)
@return (object) the configured options object */
meter: function(text, values, maxValue, colours, labels, styles, options) {
if (typeof text != 'string' && !$.isArray(text)) {
options = styles;
styles = labels;
labels = colours;
colours = maxValue;
maxValue = values;
values = text;
text = '';
}
if (typeof maxValue != 'number') {
options = styles;
styles = labels;
labels = colours;
colours = maxValue;
maxValue = null;
}
if (!$.isArray(colours)) {
options = styles;
styles = labels;
labels = colours;
colours = null;
}
if (!$.isArray(labels)) {
options = styles;
styles = labels;
labels = null;
}
if (!$.isArray(styles)) {
options = styles;
styles = null;
}
values = ($.isArray(values) ? values : [values]);
var multi = false;
for (var i = 0; i < values.length; i++) {
multi = multi || $.isArray(values[i]);
}
var ss = (multi ? [] : [$.gchart.series(values)]);
if (multi) {
for (var i = 0; i < values.length; i++) {
ss.push($.gchart.series($.isArray(values[i]) ? values[i] : [values[i]]));
}
}
values = ss;
if (colours) {
var cs = '';
$.each(colours, function(i, v) {
cs += ',' + $.gchart.color(v);
});
colours = cs.substr(1);
}
if (styles) {
var ls = ['', ''];
$.each(styles, function(i, v) {
v = ($.isArray(v) ? v : [v]);
ls[0] += '|' + $.gchart.color(v.slice(0, 3).join(','));
ls[1] += '|' + (v[3] || 15);
});
styles = ls[0].substr(1) + ls[1];
}
var axis = (labels && labels.length ? $.gchart.axis('y', labels) : null);
return $.extend({}, options || {}, {type: 'meter',
maxValue: maxValue || 100, series: values,
dataLabels: ($.isArray(text) ? text : [text || ''])},
(colours ? {extension: {chco: colours}} : {}),
(axis ? {axes: [axis]} : {}),
(styles ? {extension: {chls: styles}} : {}));
},
/* Prepare options for a map chart.
@param latLongArea (boolean) true to specify the area via latitude/longitude (optional)
@param mapArea (string) the region of the world to show (original map style) or
(number[4]) the pixel zoom or lat/long coordinates to show or
(number) all around pixel zoom (optional)
@param values (object) the countries/states to plot -
attributes are country/state codes and values
@param defaultColour (string) the colour for regions without values (optional)
@param colour (string or string[]) the starting colour or
gradient colours for rendering values (optional)
@param endColour (string) the ending colour for rendering values (optional)
@param options (object) additional settings (optional)
@return (object) the configured options object */
map: function(latLongArea, mapArea, values, defaultColour, colour, endColour, options) {
if (typeof latLongArea != 'boolean') {
options = endColour;
endColour = colour;
colour = defaultColour;
defaultColour = values;
values = mapArea;
mapArea = latLongArea;
latLongArea = false;
}
if (typeof mapArea == 'object' && !$.isArray(mapArea)) { // Optional mapArea
options = endColour;
endColour = colour;
colour = defaultColour;
defaultColour = values;
values = mapArea;
mapArea = null;
}
if (typeof defaultColour == 'object') {
options = defaultColour;
endColour = null;
colour = null;
defaultColour = null;
}
else if (typeof colour == 'object' && !$.isArray(colour)) {
options = colour;
endColour = null;
colour = null;
}
else if (typeof endColour == 'object') {
options = endColour;
endColour = null;
}
var mapRegions = [];
var data = [];
var i = 0;
for (var name in values) {
mapRegions[i] = name.replace(/_/g, '-');
data[i] = values[name];
i++;
}
if (typeof mapArea == 'number') {
mapArea = [mapArea, mapArea, mapArea, mapArea];
}
return $.extend({}, options || {},
{type: (typeof mapArea == 'string' ? 'mapOriginal' : 'map'),
mapLatLong: latLongArea, mapArea: mapArea, mapRegions: mapRegions,
mapDefaultColor: defaultColour || $.gchart._defaults.mapDefaultColor,
mapColors: ($.isArray(colour) ? colour : [colour || $.gchart._defaults.mapColors[0],
endColour || $.gchart._defaults.mapColors[1]]),
series: [$.gchart.series('', data)]});
},
/* Prepare options for generating a QR Code.
@param text (object) the QR code settings or
(string) the text to encode
@param encoding (string) the encoding scheme (optional)
@param ecLevel (string) the error correction level: l, m, q, h (optional)
@param margin (number) the margin around the code (optional)
@return (object) the configured options object */
qrCode: function(text, encoding, ecLevel, margin) {
var options = {};
if (typeof text == 'object') {
options = text;
}
else { // Individual fields
options = {dataLabels: [text], encoding: encoding,
qrECLevel: ecLevel, qrMargin: margin};
}
options.type = 'qrCode';
if (options.text) {
options.dataLabels = [options.text];
options.text = null;
}
return options;
},
/* Generate standard options for map charts.
@param options (object) the chart settings
@param labels (string) the concatenated labels for the chart
@return (string) the standard map chart options */
mapOptions: function(options, labels) {
var encoding = this['_' + options.encoding + 'Encoding'] || this['_textEncoding'];
var colours = '';
for (var i = 0; i < options.mapColors.length; i++) {
colours += ',' + $.gchart.color(options.mapColors[i]);
}
return (typeof options.mapArea == 'string' ? '&chtm=' + options.mapArea :
(options.mapArea ? (options.mapLatLong ? ':fixed=' : ':auto=') +
($.isArray(options.mapArea) ? options.mapArea.join(',') :
options.mapArea + ',' + options.mapArea + ',' + options.mapArea + ',' + options.mapArea) : '')) +
'&chd=' + encoding.apply($.gchart, [options]) +
(options.mapRegions && options.mapRegions.length ?
'&chld=' + options.mapRegions.join(typeof options.mapArea == 'string' ? '' : '|') : '') +
'&chco=' + $.gchart.color(options.mapDefaultColor) + colours;
},
/* Generate standard options for QR Code charts.
@param options (object) the chart settings
@param labels (string) the concatenated labels for the chart
@return (string) the standard QR Code chart options */
qrOptions: function(options, labels) {
return $.gchart._include('&choe=', options.encoding) +
(options.qrECLevel || options.qrMargin ?
'&chld=' + (options.qrECLevel ? options.qrECLevel.charAt(0) : 'l') +
(options.qrMargin != null ? '|' + options.qrMargin : '') : '') +
(labels ? '&chl=' + labels.substr(1) : '');
},
/* Generate standard options for charts that aren't really charts.
@param options (object) the chart settings
@param labels (string) the concatenated labels for the chart
@return (string) the standard non-chart options */
noOptions: function(options, labels) {
return '&chl=' + labels.substr(1);
},
/* Generate the options for chart size, including restriction for maps.
@param type (string) the encoded chart type
@param options (object) the chart settings
@return (string) the chart size options */
addSize: function(type, options) {
var maxSize = (type == 'map' || type == 't' ? 600 : 1000);
options.width = Math.max(10, Math.min(options.width, maxSize));
options.height = Math.max(10, Math.min(options.height, maxSize));
if (options.width * options.height > 300000) {
options.height = Math.floor(300000 / options.width);
}
return 'chs=' + options.width + 'x' + options.height;
}
});
})(jQuery); | /runestone-petljadoc-3.1.2.5.tar.gz/runestone-petljadoc-3.1.2.5/runestone/animation/jqchart/jquery.gchart.ext.js | 0.564339 | 0.562177 | jquery.gchart.ext.js | pypi |
$('div selector').gchart({type: 'pie', series: [$.gchart.series([101, 84])]});
*/
(function($) { // Hide scope, no $ conflict
/* Google Charting manager. */
function GChart() {
this._defaults = {
width: 0, // Width of the chart
height: 0, // Height of the chart
format: 'png', // Returned format: png, gif
usePost: false, // True to POST instead of GET - for larger charts with more data
secure: false, // True to access a secure version of Google Charts
margins: null, // The minimum margins (pixels) around the chart:
// all or [left/right, top/bottom] or [left, right, top, bottom]
title: '', // The title of the chart
titleColor: '', // The colour of the title
titleSize: 0, // The font size of the title
opacity: 0, // Make the entire chart semi-transparent (0.0-1.0 or 0-100)
backgroundColor: null, // The background colour for the entire image
chartColor: null, // The background colour for the chart area
legend: '', // The location of the legend: top, topVertical,
// bottom, bottomVertical, left, right, or '' for none
legendOrder: 'normal', // The order of items within a legend: normal, reverse, automatic
legendDims: null, // The minimum size (pixels) of the legend: [width, height]
legendColor: '', // The colour of the legend
legendSize: 0, // The font size of the legend
type: 'pie3D', // Type of chart requested: line, lineXY, sparkline, barHoriz, barVert,
// barHorizGrouped, barVertGrouped, barHorizOverlapped, barVertOverlapped, pie, pie3D (default),
// pieConcentric, venn, scatter, radar, radarCurved, map, mapOriginal, meter, qrCode, formula
encoding: '', // Type of data encoding: text (default), scaled, simple, extended
series: [this.series('Hello World', [60, 40])], // Details about the values to be plotted
visibleSeries: 0, // The number of series that are directly displayed, 0 for all
functions: [], // Functions to apply to be plotted based on data
dataLabels: [], // Labels for the values across all the series
axes: [], // Definitions for the various axes, each entry is either
// a string of the axis name or a GChartAxis object
ranges: [], // Definitions of ranges for the chart, each entry is an object with
// vertical (boolean), color (string), start (number, 0-1),
// and end (number, 0-1) attributes
markers: [], // Definitions of markers for the chart, each entry is an object with
// shape (arrow, circle, cross, diamond, down, flag, horizontal,
// number, plus, sparkfill, sparkline, square, text, vertical),
// color (string), series (number), item (number), size (number),
// priority (number), text (string), positioned (boolean),
// placement (string or string[]), offsets (number[2])
minValue: 0, // The minimum value of the data, $.gchart.calculate to calculate from data
maxValue: 100, // The maximum value of the data, $.gchart.calculate to calculate from data
gridSize: null, // The x and y spacings between grid lines (number or number[2])
gridLine: null, // The line and gap lengths for the grid lines (number or number[2])
gridOffsets: null, // The x and y offsets for the grid lines (number or number[2])
extension: {}, // Any custom extensions to the Google chart parameters
// Bar charts -------------
barWidth: null, // The width of each bar (pixels) or 'a' for automatic or 'r' for relative
barSpacing: null, // The space (pixels) between bars in a group
barGroupSpacing: null, // The space (pixels) between groups of bars
barZeroPoint: null, // The position (0.0 to 1.0) of the zero-line
// Pie charts -------------
pieOrientation: 0, // The angle (degrees) of orientation from the positive x-axis
// Callback
onLoad: null, // Function to call when loaded
provideJSON: false // True to return JSON description of chart with the onLoad callback
};
/* Mapping from chart type to options function: xxxOptions(). */
this._typeOptions = {'': 'standard', p: 'pie', p3: 'pie', pc: 'pie'};
/* List of additional options functions: addXXX(). */
this._chartOptions = ['Margins', 'DataFunctions', 'BarSizings', 'LineStyles', 'Colours',
'Title', 'Axes', 'Backgrounds', 'Grids', 'Markers', 'Legends', 'Extensions'];
/* Mapping from plugin chart types to Google chart types. */
this._chartTypes = {line: 'lc', lineXY: 'lxy', sparkline: 'ls', barHoriz: 'bhs', barVert: 'bvs',
barHorizGrouped: 'bhg', barVertGrouped: 'bvg', barHorizOverlapped: 'bho', barVertOverlapped: 'bvo',
pie: 'p', pie3D: 'p3', pieConcentric: 'pc', radar: 'r', radarCurved: 'rs',
lc: 'lc', lxy: 'lxy', ls: 'ls', bhs: 'bhs', bvs: 'bvs', bhg: 'bhg', bvg: 'bvg',
bho: 'bho', bvo: 'bvo', p: 'p', p3: 'p3', pc: 'pc', r: 'r', rs: 'rs'};
};
/* The name of the data property that holds the instance settings. */
var PROP_NAME = 'gChart';
/* Translations of text colour names into chart values. */
var COLOURS = {aqua: '008080', black: '000000', blue: '0000ff', fuchsia: 'ff00ff', gray: '808080',
green: '008000', grey: '808080', lime: '00ff00', maroon: '800000', navy: '000080',
olive: '808000', orange: 'ffa500', purple: '800080', red: 'ff0000', silver: 'c0c0c0',
teal: '008080', transparent: '00000000', white: 'ffffff', yellow: 'ffff00'};
/* Mapping from plugin shape types to Google chart shapes. */
var SHAPES = {annotation: 'A', arrow: 'a', candlestick: 'F', circle: 'o', cross: 'x',
diamond: 'd', down: 'v', errorbar: 'E', flag: 'f', financial: 'F', horizbar: 'H',
horizontal: 'h', number: 'N', plus: 'c', rectangle: 'C', sparkfill: 'B',
sparkline: 'D', sparkslice: 'b', square: 's', text: 't', vertical: 'V'};
/* Mapping from plugin priority names to chart priority codes. */
var PRIORITIES = {behind: -1, below: -1, normal: 0, above: +1, inFront: +1, '-': -1, '+': +1};
/* Mapping from plugin gradient names to angles. */
var GRADIENTS = {diagonalDown: -45, diagonalUp: 45, horizontal: 0, vertical: 90,
dd: -45, du: 45, h: 0, v: 90};
/* Mapping from plugin alignment names to chart alignment codes. */
var ALIGNMENTS = {left: -1, center: 0, centre: 0, right: +1, l: -1, c: 0, r: +1};
/* Mapping from plugin drawing control names to chart drawing control codes. */
var DRAWING = {line: 'l', ticks: 't', both: 'lt'};
/* Mapping from legend order names to chart drawing control codes. */
var ORDERS = {normal: 'l', reverse: 'r', automatic: 'a', '': '', l: 'l', r: 'r', a: 'a'};
/* Mapping from marker placement names to chart drawing placement codes. */
var PLACEMENTS = {barbase: 's', barcenter: 'c', barcentre: 'c', bartop: 'e', bottom: 'b',
center: 'h', centre: 'h', left: 'l', middle: 'v', right: 'r', top: 't',
b: 'b', c: 'c', e: 'e', h: 'h', l: 'l', r: 'r', s: 's', t: 't', v: 'v'};
/* Characters to use for encoding schemes. */
var SIMPLE_ENCODING = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
var EXTENDED_ENCODING = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-.';
$.extend(GChart.prototype, {
/* The base function/class. */
_prototype: GChart,
/* Class name added to elements to indicate already configured with Google charting. */
markerClassName: 'hasGChart',
/* Marker value to indicate min/max calculation from data. */
calculate: -0.123456,
/* Possible values for bar width. */
barWidthAuto: 'a', // Automatic resize to fill
barWidthRelative: 'r', // Spacings are relative to bars (0.0 - 1.0)
/* Possible values for number format. */
formatFloat: 'f',
formatPercent: 'p',
formatScientific: 'e',
formatCurrency: 'c',
/* Override the default settings for all Google chart instances.
@param options (object) the new settings to use as defaults */
setDefaults: function(options) {
extendRemove(this._defaults, options || {});
},
/* Create a new data series.
@param label (string, optional) the label for this series
@param data (number[]) the data values for this series
@param colour (string or string[], optional) the colour(s) for this series
@param fillColour (string, optional) the fill colour for this series or
(object, optional) fill slice with attributes color and range ('start:end') or
(object[], optional) array of above
@param minValue (number, optional with maxValue) the minimum value for this series
@param maxValue (number, optional with minValue) the maximum value for this series
@param thickness (number) the thickness (pixels) of the line for this series
@param segments (number[2]) the line and gap lengths (pixels) for this series
@return (object) the new series object */
series: function(label, data, colour, fillColour, minValue, maxValue, thickness, segments) {
if ($.isArray(label)) { // Optional label
segments = thickness;
thickness = maxValue;
maxValue = minValue;
minValue = fillColour;
fillColour = colour;
colour = data;
data = label;
label = '';
}
if (typeof colour == 'number') { // Optional colour/fillColour
segments = maxValue;
thickness = minValue;
maxValue = fillColour;
minValue = colour;
fillColour = null;
colour = null;
}
if (typeof fillColour == 'number') { // Optional fillColour
segments = thickness;
thickness = maxValue;
maxValue = minValue;
minValue = fillColour;
fillColour = null;
}
if ($.isArray(maxValue)) { // Optional min/max values
segments = maxValue;
thickness = minValue;
maxValue = null;
minValue = null;
}
return {label: label, data: data || [], color: colour || '',
fillColor: fillColour, minValue: minValue, maxValue: maxValue,
lineThickness: thickness, lineSegments: segments};
},
/* Load series data from CSV.
Include a header row if fields other than data required.
Use these names - label, color, fillColor, minValue, maxValue,
lineThickness, lineSegmentLine, lineSegmentGap - for series attributes.
Data columns should be labelled ynn, where nn is a sequential number.
For X-Y line charts, include xnn columns before corresponding ynn.
@param csv (string or string[]) the series data in CSV format
@return (object[]) the series definitions */
seriesFromCsv: function(csv) {
var seriesData = [];
if (!$.isArray(csv)) {
csv = csv.split('\n');
}
if (!csv.length) {
return seriesData;
}
var xyData = false;
var sColumns = [];
var xColumns = [];
var fields = ['label', 'color', 'fillColor', 'minValue', 'maxValue',
'lineThickness', 'lineSegmentLine', 'lineSegmentGap'];
$.each(csv, function(i, line) {
var cols = line.split(',');
if (i == 0 && isNaN(parseFloat(cols[0]))) { // Header row
$.each(cols, function(i, val) {
if ($.inArray(val, fields) > -1) { // Note the positions of the columns
sColumns[i] = val;
}
else if (val.match(/^x\d+$/)) { // Column with x-coordinate
xColumns[i] = val;
}
});
}
else {
var series = {};
var data = [];
var saveX = null;
$.each(cols, function(i, val) {
if (sColumns[i]) { // Non-data value
var pos = $.inArray(sColumns[i], fields);
series[sColumns[i]] = (pos > 2 ? $.gchart._numeric(val, 0) : val);
}
else if (xColumns[i]) { // X-coordinate
saveX = (val ? $.gchart._numeric(val, -1) : null);
xyData = true;
}
else {
var y = $.gchart._numeric(val, -1);
data.push(saveX != null ? [saveX, y] : y);
saveX = null;
}
});
if (series.lineSegmentLine != null && series.lineSegmentGap != null) {
series.lineSegments = [series.lineSegmentLine, series.lineSegmentGap];
series.lineSegmentLine = series.lineSegmentGap = null;
}
seriesData.push($.extend(series, {data: data}));
}
});
return (xyData ? this.seriesForXYLines(seriesData) : seriesData);
},
/* Load series data from XML. All attributes are optional except point/@y.
<data>
<series label="" color="" fillColor="" minValue="" maxValue="" lineThickness="" lineSegments="">
<point x="" y=""/>
...
</series>
...
</data>
@param xml (string or Document) the XML containing the series data
@return (object[]) the series definitions */
seriesFromXml: function(xml) {
if ($.browser.msie && typeof xml == 'string') {
var doc = new ActiveXObject('Microsoft.XMLDOM');
doc.validateOnParse = false;
doc.resolveExternals = false;
doc.loadXML(xml);
xml = doc;
}
xml = $(xml);
var seriesData = [];
var xyData = false;
try {
xml.find('series').each(function() {
var series = $(this);
var data = [];
series.find('point').each(function() {
var point = $(this);
var x = point.attr('x');
if (x != null) {
xyData = true;
x = $.gchart._numeric(x, -1);
}
y = $.gchart._numeric(point.attr('y'), -1);
data.push(x ? [x, y] : y);
});
var segments = series.attr('lineSegments');
if (segments) {
segments = segments.split(',');
for (var i = 0; i < segments.length; i++) {
segments[i] = $.gchart._numeric(segments[i], 1);
}
}
seriesData.push({label: series.attr('label'), data: data,
color: series.attr('color'), fillColor: series.attr('fillColor'),
minValue: $.gchart._numeric(series.attr('minValue'), null),
maxValue: $.gchart._numeric(series.attr('maxValue'), null),
lineThickness: $.gchart._numeric(series.attr('lineThickness'), null),
lineSegments: segments});
});
}
catch (e) {
// Ignore
}
return (xyData ? this.seriesForXYLines(seriesData) : seriesData);
},
/* Force a value to be numeric.
@param val (string) the value to convert
@param whenNaN (number) value to use if not numeric
@return (number) the numeric equivalent or whenNaN if not numeric */
_numeric: function(val, whenNaN) {
val = parseFloat(val);
return (isNaN(val) ? whenNaN : val);
},
/* Prepare series for a line XY chart.
@param series (object[]) the details of the points to plot,
each data value may be an array of two points
@return (object[]) the transformed series
@deprecated in favour of seriesForXYLines */
lineXYSeries: function(series) {
return this.seriesForXYLines(series);
},
/* Prepare series for a line XY chart.
@param series (object[]) the details of the points to plot,
each data value may be an array of two points
@return (object[]) the transformed series */
seriesForXYLines: function(series) {
var xySeries = [];
for (var i = 0; i < series.length; i++) {
var xNull = !$.isArray(series[i].data[0]);
var xData = (xNull ? [null] : []);
var yData = [];
for (var j = 0; j < series[i].data.length; j++) {
if (xNull) {
yData.push(series[i].data[j]);
}
else {
xData.push(series[i].data[j][0]);
yData.push(series[i].data[j][1]);
}
}
xySeries.push($.gchart.series(series[i].label, xData, series[i].color,
series[i].fillColor, series[i].minValue, series[i].maxValue,
series[i].lineThickness, series[i].lineSegments));
xySeries.push($.gchart.series('', yData, '',
series[i].fillColor, series[i].minValue, series[i].maxValue,
series[i].lineThickness, series[i].lineSegments));
}
return xySeries;
},
/* Generate a data function definition.
@param series (number) the output series to generate into
@param data (object[]) the function variables list or
(string) the name of a single variable
@param series (number, optional) the input series to use for the variable data or
the start of a generated range (with end/step)
@param end (number, optional) the end of the generated range
@param step (number, optional) the step between values in the generated range
@param fnText (string) the function call, using the variable(s) above,
in muParser function syntax
@return (object) the data function definition */
fn: function(series, data, start, end, step, fnText) {
if (typeof end == 'string') {
fnText = end;
end = null;
step = null;
}
if (typeof start == 'string') {
fnText = start;
start = null;
end = null;
step = null;
}
if (typeof data == 'string') {
data = this.fnVar(data, start, end, step);
}
return {series: series, data: data, fnText: fnText};
},
/* Generate a function variable definition.
@param name (string) the variable name
@param start (number) the input series to use for the variable data or
(number) the start of a generated range (with end/step)
@param end (number, optional) the end of the generated range
@param step (number, optional) the step between values in the generated range
@return (object) the function variable definition */
fnVar: function(name, start, end, step) {
return {name: name, series: (step ? -1 : start),
start: (step ? start : null), end: end, step: step};
},
/* Generate a Google chart color.
@param r (string) colour name or '#hhhhhh' or
(number) red value (0-255)
@param g (number) green value (0-255) or
(number) alpha value (0-255, optional) if r is name
@param b (number) blue value (0-255)
@param a (number) alpha value (0-255, optional)
@return (string) the translated colour */
color: function(r, g, b, a) {
var checkRange = function(value) {
if (typeof value == 'number' && (value < 0 || value > 255)) {
throw 'Value out of range (0-255) ' + value;
}
};
var twoDigits = function(value) {
return (value.length == 1 ? '0' : '') + value;
};
if (typeof r == 'string') {
checkRange(g);
return (r.match(/^#([A-Fa-f0-9]{2}){3,4}$/) ? r.substring(1) :
(COLOURS[r] || r) + (g ? twoDigits(g.toString(16)) : ''));
}
checkRange(r);
checkRange(g);
checkRange(b);
checkRange(a);
return twoDigits(r.toString(16)) + twoDigits(g.toString(16)) +
twoDigits(b.toString(16)) + (a ? twoDigits(a.toString(16)) : '');
},
/* Create a simple linear gradient definition for a background.
@param angle (string or number) the angle of the gradient from positive x-axis
@param colours (string[]) an array of colours or
(string) the starting colour
@param positions (number[], optional) the positions (0.0 to 1.0) of the gradient colours or
(string, optional) the ending colour
@return (object) the gradient definition */
gradient: function(angle, colours, positions) {
var colourPoints = [];
if ($.isArray(colours)) {
var step = 1 / (colours.length - 1);
for (var i = 0; i < colours.length; i++) {
colourPoints.push([colours[i], (positions ? positions[i] : Math.round(i * step * 100) / 100)]);
}
}
else {
colourPoints = [[colours, 0], [positions, 1]];
}
return {angle: angle, colorPoints: colourPoints};
},
/* Create a colour striping definition for a background.
@param angle (string or number) the angle of the stripes from positive x-axis
@param colours (string[]) the colours to stripe
@param widths (number[], optional) the widths (0.0 to 1.0) of the stripes
@return (object) the stripe definition */
stripe: function(angle, colours, widths) {
var colourPoints = [];
var avgWidth = Math.round(100 / colours.length) / 100;
for (var i = 0; i < colours.length; i++) {
colourPoints.push([colours[i], (widths ? widths[i] : avgWidth)]);
}
return {angle: angle, striped: true, colorPoints: colourPoints};
},
/* Create a range definition.
@param vertical (boolean, optional) true if vertical, false if horizontal
@param colour (string) the marker's colour
@param start (number) the starting point for the range (0.0 to 1.0)
@param end (number, optional) the ending point for the range (0.0 to 1.0)
@return (object) the range definition */
range: function(vertical, colour, start, end) {
if (typeof vertical == 'string') { // Optional vertical
end = start;
start = colour;
colour = vertical;
vertical = false;
}
return {vertical: vertical, color: colour, start: start, end: end};
},
/* Create a marker definition.
@param shape (string) the marker shape
@param colour (string) the marker's colour
@param series (number) the series to which the marker applies
@param item (number or string or number[2 or 3], optional)
the item in the series to which it applies or 'all' or
'everyn' or 'everyn[s:e]' or [start, end, every]
@param size (number, optional) the size (pixels) of the marker or
(string) 'thickness:length' for horizline or vertical
@param priority (string or number, optional) the rendering priority
@param text (string, optional) the display text for a text type marker
@param positioned (boolean, optional) true to absolutely position the marker
@param placement (string or string[], optional) placement locations
@param offsets (number[2], optional) pixel offsets, horizontal and vertical
@return (object) the marker definition */
marker: function(shape, colour, series, item, size, priority, text,
positioned, placement, offsets) {
if (typeof size == 'boolean') {
offsets = text;
placement = priority;
positioned = size;
text = null;
priority = null;
size = null;
}
if ($.isArray(size)) {
if (typeof size[0] == 'string') {
offsets = priority;
placement = size;
}
else {
offsets = size;
placement = null;
}
positioned = null;
text = null;
priority = null;
size = null;
}
if (typeof priority == 'boolean') {
offsets = positioned;
placement = text;
positioned = priority;
text = null;
priority = null;
}
if ($.isArray(priority)) {
if (typeof priority[0] == 'string') {
offsets = text;
placement = priority;
}
else {
offsets = priority;
placement = null;
}
positioned = null;
text = null;
priority = null;
}
if (typeof text == 'boolean') {
offsets = placement;
placement = positioned;
positioned = text;
text = null;
}
if ($.isArray(text)) {
if (typeof text[0] == 'string') {
offsets = positioned;
placement = text;
}
else {
offsets = text;
placement = null;
}
positioned = null;
text = null;
}
if ($.isArray(positioned)) {
if (typeof positioned[0] == 'string') {
offsets = placement;
placement = positioned;
}
else {
offsets = positioned;
placement = null;
}
positioned = null;
}
if ($.isArray(placement) && typeof placement[0] != 'string') {
offsets = placement;
placement = null;
}
return {shape: shape, color: colour, series: series,
item: (item || item == 0 ? item : -1), size: size || 10,
priority: (priority != null ? priority : 0), text: text,
positioned: positioned, placement: placement, offsets: offsets};
},
/* Create a number format for a marker.
@param type (object) containing all these settings or
(string) 'f' for floating point, 'p' for percentage,
'e' for scientific notation, 'c<CUR>' for currency (as specified by CUR)
@param prefix (string, optional) text appearing before the number
@param suffix (string, optional - can only be present if prefix is present)
text appearing after the number
@param precision (number, optional) the number of decimal places
@param showX (boolean, optional) true to show the x-value, false for the y-value
@param zeroes (boolean or number, optional - can only be present if showX is present)
true to display trailing zeroes, number for that many trailing zeroes
@param separators (boolean, optional - can only be present if showX and zeroes are present)
true to display group separators
@return (string) the format definition */
numberFormat: function(type, prefix, suffix, precision, showX, zeroes, separators) {
var format = initNumberFormat(type, prefix, suffix, precision, showX, zeroes, separators);
return format.prefix + '*' + format.type + format.precision +
(format.zeroes ? (typeof format.zeroes == 'number' ? 'z' + format.zeroes : 'z') : '') +
(format.separators ? 's' : '') + (format.showX ? 'x' : '') + '*' + format.suffix;
},
/* Create an axis definition.
@param axis (string) the axis position: top, bottom, left, right
@param lineColour (string, optional) the axis lines' colour
@param labels (string[]) the labels for this axis
@param positions (number[], optional) the positions of the labels
@param rangeStart (number, optional with next two) start of range
@param rangeEnd (number, optional with above) end of range
@param rangeInterval (number, optional with above) interval between values in the range
@param colour (string, optional) the labels' colour
@param alignment (string, optional) the labels' alignment
@param size (number, optional) the labels' size
@param format (object, optional) the labels' number format options
@return (object) the axis definition */
axis: function(axis, lineColour, labels, positions, rangeStart,
rangeEnd, rangeInterval, colour, alignment, size, format) {
return new GChartAxis(axis, lineColour, labels, positions, rangeStart,
rangeEnd, rangeInterval, colour, alignment, size, format);
},
/* Determine the region within a chart.
@param event (MouseEvent) the mouse event contining the cursor position
@param jsonData (object) the JSON description of the chart
@return (object) the current region details (type, series, and point) or null if none */
findRegion: function(event, jsonData) {
if (!jsonData || !jsonData.chartshape) {
return null;
}
var decodeName = function(name) {
var matches = name.match(/([^\d]+)(\d+)(?:_(\d)+)?/);
return {type: matches[1], series: parseInt(matches[2]), point: parseInt(matches[3] || -1)};
};
var offset = $(event.target).offset();
var x = event.pageX - offset.left;
var y = event.pageY - offset.top;
for (var i = 0; i < jsonData.chartshape.length; i++) {
var shape = jsonData.chartshape[i];
switch (shape.type) {
case 'RECT':
if (shape.coords[0] <= x && x <= shape.coords[2] &&
shape.coords[1] <= y && y <= shape.coords[3]) {
return decodeName(shape.name);
}
break;
case 'CIRCLE':
if (Math.abs(x - shape.coords[0]) <= shape.coords[2] &&
Math.abs(y - shape.coords[1]) <= shape.coords[2] &&
Math.sqrt(Math.pow(x - shape.coords[0], 2) +
Math.pow(y - shape.coords[1], 2)) <= shape.coords[2]) {
return decodeName(shape.name);
}
break;
case 'POLY':
if ($.gchart._insidePolygon(shape.coords, x, y)) {
return decodeName(shape.name);
}
break;
}
}
return null;
},
/* Determine whether a point is within a polygon.
Ray casting algorithm adapted from http://ozviz.wasp.uwa.edu.au/~pbourke/geometry/insidepoly/.
@param coords (number[]) the polygon coords as [x1, y1, x2, y2, ...]
@param x (number) the point's x-coord
@param y (number) the point's y-coord
@return (boolean) true if the point is inside, false if not */
_insidePolygon: function(coords, x, y) {
var counter = 0;
var p1 = [coords[0], coords[1]];
for (var i = 2; i <= coords.length; i += 2) {
var p2 = [coords[i % coords.length], coords[i % coords.length + 1]];
if (y > Math.min(p1[1], p2[1]) && y <= Math.max(p1[1], p2[1])) {
if (x <= Math.max(p1[0], p2[0]) && p1[1] != p2[1]) {
var xinters = (y - p1[1]) * (p2[0] - p1[0]) / (p2[1] - p1[1]) + p1[0];
if (p1[0] == p2[0] || x <= xinters) {
counter++;
}
}
}
p1 = p2;
}
return (counter % 2 != 0);
},
/* Attach the Google chart functionality to a div.
@param target (element) the containing division
@param options (object) the settings for this Google chart instance (optional) */
_attachGChart: function(target, options) {
target = $(target);
if (target.is('.' + this.markerClassName)) {
return;
}
target.addClass(this.markerClassName);
options = options || {};
var width = options.width || parseInt(target.css('width'), 10);
var height = options.height || parseInt(target.css('height'), 10);
var allOptions = $.extend({}, this._defaults, options,
{width: width, height: height});
$.data(target[0], PROP_NAME, allOptions);
this._updateChart(target[0], allOptions);
},
/* Reconfigure the settings for a Google charting div.
@param target (element) the containing division
@param name (object) the new settings for this Google chart instance or
(string) the name of a single option
@param value (any, optional) the option's value */
_changeGChart: function(target, name, value) {
var options = name || {};
if (typeof name == 'string') {
options = {};
options[name] = value;
}
var curOptions = $.data(target, PROP_NAME);
extendRemove(curOptions || {}, options);
$.data(target, PROP_NAME, curOptions);
this._updateChart(target, curOptions);
},
/* Remove the Google charting functionality from a div.
@param target (element) the containing division */
_destroyGChart: function(target) {
target = $(target);
if (!target.is('.' + this.markerClassName)) {
return;
}
target.removeClass(this.markerClassName).empty();
$.removeData(target[0], PROP_NAME);
},
/* Generate the Google charting request with the new settings.
@param options (object) the new settings for this Google chart instance
@return (string) the Google chart URL */
_generateChart: function(options) {
var type = (options.type && options.type.match(/.+:.+/) ?
options.type : this._chartTypes[options.type] || 'p3');
var labels = '';
for (var i = 0; i < options.dataLabels.length; i++) {
labels += '|' + encodeURIComponent(options.dataLabels[i] || '');
}
labels = (labels.length == options.dataLabels.length ? '' : labels);
var format = options.format || 'png';
var img = (options.secure ? 'https://chart.googleapis.com' : 'http://chart.apis.google.com') + '/chart?' +
this.addSize(type, options) + (format != 'png' ? '&chof=' + format : '') + '&cht=' + type +
this[(this._typeOptions[type.replace(/:.*/, '')] || this._typeOptions['']) +
'Options'](options, labels);
for (var i = 0; i < this._chartOptions.length; i++) {
img += this['add' + this._chartOptions[i]](type, options);
}
return img;
},
/* Optionally include a parameter.
@param name (string) the parameter name
@param value (string) its value
@return (string) name and value, or blank if no value */
_include: function(name, value) {
return (value ? name + value : '');
},
/* Generate standard options for charts.
@param options (object) the chart settings
@param labels (string) the concatenated labels for the chart
@return (string) the standard chart options */
standardOptions: function(options, labels) {
var encoding = this['_' + options.encoding + 'Encoding'] || this['_textEncoding'];
return '&chd=' + encoding.apply($.gchart, [options]) +
(labels ? '&chl=' + labels.substr(1) : '');
},
/* Generate standard options for pie charts.
@param options (object) the chart settings
@param labels (string) the concatenated labels for the chart
@return (string) the standard pie chart options */
pieOptions: function(options, labels) {
return (options.pieOrientation ? '&chp=' + (options.pieOrientation / 180 * Math.PI) : '') +
this.standardOptions(options, labels);
},
/* Generate the options for chart size.
@param type (string) the encoded chart type
@param options (object) the chart settings
@return (string) the chart size options */
addSize: function(type, options) {
var maxSize = 1000;
options.width = Math.max(10, Math.min(options.width, maxSize));
options.height = Math.max(10, Math.min(options.height, maxSize));
if (options.width * options.height > 300000) {
options.height = Math.floor(300000 / options.width);
}
return 'chs=' + options.width + 'x' + options.height;
},
/* Generate the options for chart margins.
@param type (string) the encoded chart type
@param options (object) the chart settings
@return (string) the chart margin options */
addMargins: function(type, options) {
var margins = options.margins;
margins = (margins == null ? null :
(typeof margins == 'number' ? [margins, margins, margins, margins] :
(!$.isArray(margins) ? null :
(margins.length == 4 ? margins :
(margins.length == 2 ? [margins[0], margins[0], margins[1], margins[1]] : null)))));
return (!margins ? '' : '&chma=' + margins.join(',') +
(!options.legendDims || options.legendDims.length != 2 ? '' :
'|' + options.legendDims.join(',')));
},
/* Generate the options for chart data functions.
@param type (string) the encoded chart type
@param options (object) the chart settings
@return (string) the chart function options */
addDataFunctions: function(type, options) {
var fns = '';
for (var i = 0; i < options.functions.length; i++) {
var fn = options.functions[i];
var data = '';
fn.data = ($.isArray(fn.data) ? fn.data : [fn.data]);
for (var j = 0; j < fn.data.length; j++) {
var fnVar = fn.data[j];
data += ';' + fnVar.name + ',' + (fnVar.series != -1 ? fnVar.series :
fnVar.start + ',' + fnVar.end + ',' + fnVar.step);
}
fns += '|' + fn.series + ',' + data.substr(1) + ',' + encodeURIComponent(fn.fnText);
}
return (fns ? '&chfd=' + fns.substr(1) : '');
},
/* Generate the options for bar chart sizings.
@param type (string) the encoded chart type
@param options (object) the chart settings
@return (string) the bar chart size options */
addBarSizings: function(type, options) {
return (type.substr(0, 1) != 'b' ? '' : (options.barWidth == null ? '' :
'&chbh=' + options.barWidth +
(options.barSpacing == null ? '' : ',' + (options.barWidth == $.gchart.barWidthRelative ?
Math.min(Math.max(options.barSpacing, 0.0), 1.0) : options.barSpacing) +
(options.barGroupSpacing == null ? '' : ',' + (options.barWidth == $.gchart.barWidthRelative ?
Math.min(Math.max(options.barGroupSpacing, 0.0), 1.0) : options.barGroupSpacing)))) +
(options.barZeroPoint == null ? '' : '&chp=' + options.barZeroPoint));
},
/* Generate the options for chart line styles.
@param type (string) the encoded chart type
@param options (object) the chart settings
@return (string) the chart line style options */
addLineStyles: function(type, options) {
if (type.charAt(0) != 'l') {
return '';
}
var lines = '';
for (var i = 0; i < options.series.length; i++) {
if (options.series[i].lineThickness && $.isArray(options.series[i].lineSegments)) {
lines += '|' + options.series[i].lineThickness + ',' +
options.series[i].lineSegments.join(',');
}
}
return (lines ? '&chls=' + lines.substr(1) : '');
},
/* Generate the options for chart colours.
@param type (string) the encoded chart type
@param options (object) the chart settings
@return (string) the chart colour options */
addColours: function(type, options) {
var colours = '';
var hasColour = false;
for (var i = 0; i < options.series.length; i++) {
var clrs = '';
if (type != 'lxy' || i % 2 == 0) {
var sep = ',';
$.each(($.isArray(options.series[i].color) ? options.series[i].color :
[options.series[i].color]), function(i, v) {
var colour = $.gchart.color(v || '');
if (colour) {
hasColour = true;
}
clrs += sep + (colour || '000000');
sep = '|';
});
}
colours += (hasColour ? clrs : '');
}
return (colours.length > options.series.length ? '&chco=' + colours.substr(1) : '');
},
/* Generate the options for chart title.
@param type (string) the encoded chart type
@param options (object) the chart settings
@return (string) the chart title options */
addTitle: function(type, options) {
return $.gchart._include('&chtt=', encodeURIComponent(options.title)) +
(options.titleColor || options.titleSize ?
'&chts=' + ($.gchart.color(options.titleColor) || '000000') + ',' +
(options.titleSize || 14) : '');
},
/* Generate the options for chart backgrounds.
@param type (string) the encoded chart type
@param options (object) the chart settings
@return (string) the chart background options */
addBackgrounds: function(type, options) {
var opacity = (!options.opacity ? null : '000000' +
Math.floor(options.opacity / (options.opacity > 1 ? 100 : 1) * 255).toString(16));
if (opacity && opacity.length < 8) {
opacity = '0' + opacity;
}
var addBackground = function(area, background) {
if (background == null) {
return '';
}
if (typeof background == 'string') {
return area + ',s,' + $.gchart.color(background);
}
var bg = area + ',l' + (background.striped ? 's' : 'g') + ',' +
(GRADIENTS[background.angle] != null ? GRADIENTS[background.angle] : background.angle);
for (var i = 0; i < background.colorPoints.length; i++) {
bg += ',' + $.gchart.color(background.colorPoints[i][0]) +
',' + background.colorPoints[i][1];
}
return bg;
};
var backgrounds = addBackground('|a', opacity) + addBackground('|bg', options.backgroundColor) +
addBackground('|c', options.chartColor);
for (var i = 0; i < options.series.length; i++) {
if (options.series[i].fillColor && options.series[i].fillColor.colorPoints) {
backgrounds += addBackground('|b' + i, options.series[i].fillColor);
}
}
return (backgrounds ? '&chf=' + backgrounds.substr(1) : '');
},
/* Generate the options for chart grids.
@param type (string) the encoded chart type
@param options (object) the chart settings
@return (string) the chart grid options */
addGrids: function(type, options) {
var size = (typeof options.gridSize == 'number' ?
[options.gridSize, options.gridSize] : options.gridSize);
var line = (typeof options.gridLine == 'number' ?
[options.gridLine, options.gridLine] : options.gridLine);
var offsets = (typeof options.gridOffsets == 'number' ?
[options.gridOffsets, options.gridOffsets] : options.gridOffsets);
return (!size ? '' : '&chg=' + size[0] + ',' + size[1] +
(!line ? '' : ',' + line[0] + ',' + line[1] +
(!offsets ? '' : ',' + offsets[0] + ',' + offsets[1])));
},
/* Generate the options for chart legend.
@param type (string) the encoded chart type
@param options (object) the chart settings
@return (string) the chart legend options */
addLegends: function(type, options) {
var legends = '';
for (var i = 0; i < options.series.length; i++) {
if (type != 'lxy' || i % 2 == 0) {
legends += '|' + encodeURIComponent(options.series[i].label || '');
}
}
var order = (options.legendOrder && options.legendOrder.match(/^\d+(,\d+)*$/) ?
options.legendOrder : ORDERS[options.legendOrder]) || '';
return (!options.legend ||
(type != 'lxy' && legends.length <= options.series.length) ||
(type == 'lxy' && legends.length <= (options.series.length / 2)) ? '' :
'&chdl=' + legends.substr(1) + $.gchart._include('&chdlp=',
options.legend.charAt(0) + (options.legend.indexOf('V') > -1 ? 'v' : '') +
$.gchart._include('|', order)) + (options.legendColor || options.legendSize ? '&chdls=' +
($.gchart.color(options.legendColor) || '000000') + ',' + (options.legendSize || 11) : ''));
},
/* Generate the options for chart extensions.
@param type (string) the encoded chart type
@param options (object) the chart settings
@return (string) the chart extension options */
addExtensions: function(type, options) {
var params = '';
for (var name in options.extension) {
params += '&' + name + '=' + options.extension[name];
}
return params;
},
/* Generate axes parameters.
@param type (string) the encoded chart type
@param options (object) the current instance settings
@return (string) the axes parameters */
addAxes: function(type, options) {
var axes = '';
var axesLabels = '';
var axesPositions = '';
var axesRanges = '';
var axesStyles = '';
var axesTicks = '';
for (var i = 0; i < options.axes.length; i++) {
if (!options.axes[i]) {
continue;
}
var axisDef = (typeof options.axes[i] == 'string' ?
new GChartAxis(options.axes[i]) : options.axes[i]);
var axis = axisDef.axis().charAt(0);
axes += ',' + (axis == 'b' ? 'x' : (axis == 'l' ? 'y' : axis));
if (axisDef.labels()) {
var labels = '';
for (var j = 0; j < axisDef.labels().length; j++) {
labels += '|' + encodeURIComponent(axisDef.labels()[j] || '');
}
axesLabels += (labels ? '|' + i + ':' + labels : '');
}
if (axisDef.positions()) {
var positions = '';
for (var j = 0; j < axisDef.positions().length; j++) {
positions += ',' + axisDef.positions()[j];
}
axesPositions += (positions ? '|' + i + positions : '');
}
if (axisDef.range()) {
var range = axisDef.range();
axesRanges += '|' + i + ',' + range[0] + ',' + range[1] +
(range[2] ? ',' + range[2] : '');
}
var ticks = axisDef.ticks() || {};
if (axisDef.color() || axisDef.style() || axisDef.drawing() || ticks.color || axisDef.format()) {
var style = axisDef.style() || {};
axesStyles += '|' + i +
(axisDef.format() ? 'N' + this.numberFormat(axisDef.format()) : '') + ',' +
$.gchart.color(style.color || 'gray') + ',' +
(style.size || 10) + ',' +
(ALIGNMENTS[style.alignment] || style.alignment || 0) + ',' +
(DRAWING[axisDef.drawing()] || axisDef.drawing() || 'lt') +
(!ticks.color && !axisDef.color() ? '' :
',' + (ticks.color ? $.gchart.color(ticks.color) : '808080') +
(!axisDef.color() ? '' : ',' + $.gchart.color(axisDef.color())));
}
if (ticks.length) {
axesTicks += '|' + i + ',' + ($.isArray(ticks.length) ? ticks.length.join(',') : ticks.length);
}
}
return (!axes ? '' : '&chxt=' + axes.substr(1) +
(!axesLabels ? '' : '&chxl=' + axesLabels.substr(1)) +
(!axesPositions ? '' : '&chxp=' + axesPositions.substr(1)) +
(!axesRanges ? '' : '&chxr=' + axesRanges.substr(1)) +
(!axesStyles ? '' : '&chxs=' + axesStyles.substr(1)) +
(!axesTicks ? '' : '&chxtc=' + axesTicks.substr(1)));
},
/* Generate markers parameters.
@param type (string) the encoded chart type
@param options (object) the current instance settings
@return (string) the markers parameters */
addMarkers: function(type, options) {
var markers = '';
var decodeItem = function(item, positioned) {
if (item == 'all') {
return -1;
}
if (typeof item == 'string') {
var matches = /^every(\d+)(?:\[(\d+):(\d+)\])?$/.exec(item);
if (matches) {
var every = parseInt(matches[1], 10);
return (matches[2] && matches[3] ?
(positioned ? Math.max(0.0, Math.min(1.0, matches[2])) : matches[2]) + ':' +
(positioned ? Math.max(0.0, Math.min(1.0, matches[3])) : matches[3]) + ':' +
every : -every);
}
}
if ($.isArray(item)) {
item = $.map(item, function(v, i) {
return (positioned ? Math.max(0.0, Math.min(1.0, v)) : v);
});
return item.join(':') + (item.length < 2 ? ':' : '');
}
return item;
};
var escapeText = function(value) {
return value.replace(/,/g, '\\,');
};
for (var i = 0; i < options.markers.length; i++) {
var marker = options.markers[i];
var shape = SHAPES[marker.shape] || marker.shape;
var placement = '';
if (marker.placement) {
var placements = $.makeArray(marker.placement);
for (var j = 0; j < placements.length; j++) {
placement += PLACEMENTS[placements[j]] || '';
}
}
markers += '|' + (marker.positioned ? '@' : '') + shape +
('AfNt'.indexOf(shape) > -1 ? escapeText(marker.text || '') : '') + ',' +
$.gchart.color(marker.color) + ',' +
marker.series + ',' + decodeItem(marker.item, marker.positioned) +
',' + marker.size + ',' + (PRIORITIES[marker.priority] != null ?
PRIORITIES[marker.priority] : marker.priority) +
(placement || marker.offsets ? ',' + placement +
':' + (marker.offsets && marker.offsets[0] ? marker.offsets[0] : '') +
':' + (marker.offsets && marker.offsets[1] ? marker.offsets[1] : '') : '');
}
for (var i = 0; i < options.ranges.length; i++) {
markers += '|' + (options.ranges[i].vertical ? 'R' : 'r') + ',' +
$.gchart.color(options.ranges[i].color) + ',0,' +
options.ranges[i].start + ',' +
(options.ranges[i].end || options.ranges[i].start + 0.005);
}
for (var i = 0; i < options.series.length; i++) {
if (options.series[i].fillColor && !options.series[i].fillColor.colorPoints) {
var fills = ($.isArray(options.series[i].fillColor) ?
options.series[i].fillColor : [options.series[i].fillColor]);
for (var j = 0; j < fills.length; j++) {
if (typeof fills[j] == 'string') {
markers += '|b,' + $.gchart.color(options.series[i].fillColor) +
',' + i + ',' + (i + 1) + ',0';
}
else {
var props = ($.isArray(fills[j]) ? fills[j] : [fills[j].color, fills[j].range]);
markers += '|B,' + $.gchart.color(props[0]) +
',' + i + ',' + props[1] + ',0';
}
}
}
}
return (markers ? '&chm=' + markers.substr(1) : '');
},
/* Update the Google charting div with the new settings.
@param target (element) the containing division
@param options (object) the new settings for this Google chart instance */
_updateChart: function(target, options) {
options._src = this._generateChart(options);
if (options.usePost) {
var form = '<form action="' +
(options.secure ? 'https://chart.googleapis.com' : 'http://chart.apis.google.com') +
'/chart?' + Math.floor(Math.random() * 1e8) + '" method="POST">';
var pattern = /(\w+)=([^&]*)/g;
var match = pattern.exec(options._src);
while (match) {
form += '<input type="hidden" name="' + match[1] + '" value="' +
($.inArray(match[1], ['chdl', 'chl', 'chtt', 'chxl']) > -1 ?
decodeURIComponent(match[2]) : match[2]) + '">';
match = pattern.exec(options._src);
}
form += '</form>';
target = $(target);
target.empty();
var ifr = $('<iframe></iframe>').appendTo(target).css({width: '100%', height: '100%'});
var doc = ifr.contents()[0]; // Write iframe directly
doc.open();
doc.write(form);
doc.close();
ifr.show().contents().find('form').submit();
}
else {
var img = $(new Image()); // Prepare to load chart image in background
img.load(function() { // Once loaded...
$(target).find('img').remove().end().append(this); // Replace
if (options.onLoad) {
if (options.provideJSON) { // Retrieve JSON details
$.getJSON(options._src + '&chof=json&callback=?',
function(data) {
options.onLoad.apply(target, [$.gchart._normaliseRects(data)]);
});
}
else {
options.onLoad.apply(target, []);
}
}
});
$(img).attr('src', options._src);
}
},
/* Ensure that rectangle coords go from min to max.
@param jsonData (object) the JSON description of the chart
@return (object) the normalised JSON description */
_normaliseRects: function(jsonData) {
if (jsonData && jsonData.chartshape) {
for (var i = 0; i < jsonData.chartshape.length; i++) {
var shape = jsonData.chartshape[i];
if (shape.type == 'RECT') {
if (shape.coords[0] > shape.coords[2]) {
var temp = shape.coords[0];
shape.coords[0] = shape.coords[2];
shape.coords[2] = temp;
}
if (shape.coords[1] > shape.coords[3]) {
var temp = shape.coords[1];
shape.coords[1] = shape.coords[3];
shape.coords[3] = temp;
}
}
}
}
return jsonData;
},
/* Encode all series with text encoding.
@param options (object) the settings for this Google chart instance
@return (string) the encoded series data */
_textEncoding: function(options) {
var minValue = (options.minValue == $.gchart.calculate ?
this._calculateMinValue(options.series) : options.minValue);
var maxValue = (options.maxValue == $.gchart.calculate ?
this._calculateMaxValue(options.series) : options.maxValue);
var data = '';
for (var i = 0; i < options.series.length; i++) {
data += '|' + this._textEncode(options.series[i], minValue, maxValue);
}
return 't' + (options.visibleSeries || '') + ':' + data.substr(1);
},
/* Encode values in text format: numeric 0.0 to 100.0, comma separated, -1 for null
@param series (object) details about the data values to encode
@param minValue (number) the minimum possible data value
@param maxValue (number) the maximum possible data value
@return (string) the encoded data values */
_textEncode: function(series, minValue, maxValue) {
minValue = (series.minValue != null ? series.minValue : minValue);
maxValue = (series.maxValue != null ? series.maxValue : maxValue);
var factor = 100 / (maxValue - minValue);
var data = '';
for (var i = 0; i < series.data.length; i++) {
data += ',' + (series.data[i] == null || isNaN(series.data[i]) ? '-1' :
Math.round(factor * (series.data[i] - minValue) * 100) / 100);
}
return data.substr(1);
},
/* Encode all series with scaled text encoding.
@param options (object) the settings for this Google chart instance
@return (string) the encoded series data */
_scaledEncoding: function(options) {
var minValue = (options.minValue == $.gchart.calculate ?
this._calculateMinValue(options.series) : options.minValue);
var maxValue = (options.maxValue == $.gchart.calculate ?
this._calculateMaxValue(options.series) : options.maxValue);
var data = '';
var minMax = '';
for (var i = 0; i < options.series.length; i++) {
data += '|' + this._scaledEncode(options.series[i], minValue);
minMax += ',' + (options.series[i].minValue != null ?
options.series[i].minValue : minValue) +
',' + (options.series[i].maxValue != null ?
options.series[i].maxValue : maxValue);
}
return 't' + (options.visibleSeries || '') + ':' + data.substr(1) +
'&chds=' + minMax.substr(1);
},
/* Encode values in text format: numeric min to max, comma separated, min - 1 for null
@param series (object) details about the data values to encode
@param minValue (number) the minimum possible data value
@return (string) the encoded data values */
_scaledEncode: function(series, minValue) {
minValue = (series.minValue != null ? series.minValue : minValue);
var data = '';
for (var i = 0; i < series.data.length; i++) {
data += ',' + (series.data[i] == null || isNaN(series.data[i]) ?
(minValue - 1) : series.data[i]);
}
return data.substr(1);
},
/* Encode all series with simple encoding.
@param options (object) the settings for this Google chart instance
@return (string) the encoded series data */
_simpleEncoding: function(options) {
var minValue = (options.minValue == $.gchart.calculate ?
this._calculateMinValue(options.series) : options.minValue);
var maxValue = (options.maxValue == $.gchart.calculate ?
this._calculateMaxValue(options.series) : options.maxValue);
var data = '';
for (var i = 0; i < options.series.length; i++) {
data += ',' + this._simpleEncode(options.series[i], minValue, maxValue);
}
return 's' + (options.visibleSeries || '') + ':' + data.substr(1);
},
/* Encode values in simple format: single character,
banded-62 as A-Za-z0-9, _ for null.
@param series (object) details about the data values to encode
@param minValue (number) the minimum possible data value
@param maxValue (number) the maximum possible data value
@return (string) the encoded data values */
_simpleEncode: function(series, minValue, maxValue) {
minValue = (series.minValue != null ? series.minValue : minValue);
maxValue = (series.maxValue != null ? series.maxValue : maxValue);
var factor = 61 / (maxValue - minValue);
var data = '';
for (var i = 0; i < series.data.length; i++) {
data += (series.data[i] == null || isNaN(series.data[i]) ? '_' :
SIMPLE_ENCODING.charAt(Math.round(factor * (series.data[i] - minValue))));
}
return data;
},
/* Encode all series with extended encoding.
@param options (object) the settings for this Google chart instance
@return (string) the encoded series data */
_extendedEncoding: function(options) {
var minValue = (options.minValue == $.gchart.calculate ?
this._calculateMinValue(options.series) : options.minValue);
var maxValue = (options.maxValue == $.gchart.calculate ?
this._calculateMaxValue(options.series) : options.maxValue);
var data = '';
for (var i = 0; i < options.series.length; i++) {
data += ',' + this._extendedEncode(options.series[i], minValue, maxValue);
}
return 'e' + (options.visibleSeries || '') + ':' + data.substr(1);
},
/* Encode values in extended format: double character,
banded-4096 as A-Za-z0-9-., __ for null.
@param series (object) details about the data values to encode
@param minValue (number) the minimum possible data value
@param maxValue (number) the maximum possible data value
@return (string) the encoded data values */
_extendedEncode: function(series, minValue, maxValue) {
minValue = (series.minValue != null ? series.minValue : minValue);
maxValue = (series.maxValue != null ? series.maxValue : maxValue);
var factor = 4095 / (maxValue - minValue);
var encode = function(value) {
return EXTENDED_ENCODING.charAt(value / 64) +
EXTENDED_ENCODING.charAt(value % 64);
};
var data = '';
for (var i = 0; i < series.data.length; i++) {
data += (series.data[i] == null || isNaN(series.data[i]) ? '__' :
encode(Math.round(factor * (series.data[i] - minValue))));
}
return data;
},
/* Determine the minimum value amongst the data values.
@param series (object[]) the series to examine
@return (number) the minimum value therein */
_calculateMinValue: function(series) {
var minValue = 99999999;
for (var i = 0; i < series.length; i++) {
var data = series[i].data;
for (var j = 0; j < data.length; j++) {
minValue = Math.min(minValue, (data[j] == null ? 99999999 : data[j]));
}
}
return minValue;
},
/* Determine the maximum value amongst the data values.
@param series (object[]) the series to examine
@return (number) the maximum value therein */
_calculateMaxValue: function(series) {
var maxValue = -99999999;
for (var i = 0; i < series.length; i++) {
var data = series[i].data;
for (var j = 0; j < data.length; j++) {
maxValue = Math.max(maxValue, (data[j] == null ? -99999999 : data[j]));
}
}
return maxValue;
}
});
/* The definition of a chart axis.
@param axis (string) the axis position: top, bottom, left, right
@param lineColour (string, optional) the axis lines' colour
@param labels (string[]) the labels for this axis
@param positions (number[], optional) the positions of the labels
@param rangeStart (number, optional with next two) start of range
@param rangeEnd (number, optional with above) end of range
@param rangeInterval (number, optional with above) interval between values in the range
@param colour (string, optional) the labels' colour
@param alignment (string, optional) the labels' alignment
@param size (number, optional) the labels' size
@param format (object, optional) the labels' number format options */
function GChartAxis(axis, lineColour, labels, positions, rangeStart, rangeEnd, rangeInterval,
colour, alignment, size, format) {
if (typeof lineColour != 'string') { // Optional lineColour
format = size;
size = alignment;
alignment = colour;
colour = rangeInterval;
rangeInterval = rangeEnd;
rangeEnd = rangeStart;
rangeStart = positions;
positions = labels;
labels = lineColour;
lineColour = null;
}
if (typeof labels == 'number') { // Range instead of labels/positions
format = alignment;
size = colour;
alignment = rangeInterval;
colour = rangeEnd;
rangeInterval = rangeStart;
rangeEnd = positions;
rangeStart = labels;
positions = null;
labels = null;
}
else if (!$.isArray(positions)) { // Optional positions
format = size;
size = alignment;
alignment = colour;
colour = rangeInterval;
rangeInterval = rangeEnd;
rangeEnd = rangeStart;
rangeStart = positions;
positions = null;
}
if (typeof rangeStart == 'string') { // Optional rangeStart/rangeEnd/rangeInterval
format = colour;
size = rangeInterval;
alignment = rangeEnd;
colour = rangeStart;
rangeInterval = null;
rangeEnd = null;
rangeStart = null;
}
if (typeof rangeInterval == 'string') { // Optional rangeInterval
format = size;
size = alignment;
alignment = colour;
colour = rangeInterval;
rangeInterval = null;
}
if (typeof alignment == 'number') { // Optional alignment
format = size;
size = alignment;
alignment = null;
}
this._axis = axis;
this._lineColor = lineColour;
this._labels = labels;
this._positions = positions;
this._range = (rangeStart != null ? [rangeStart, rangeEnd, rangeInterval || null] : null);
this._color = colour;
this._alignment = alignment;
this._size = size;
this._drawing = null;
this._tickColor = null;
this._tickLength = null;
this._format = format;
}
$.extend(GChartAxis.prototype, {
/* Get/set the axis position.
@param axis (string) the axis position: top, bottom, left, right
@return (GChartAxis) the axis object or
(string) the axis position (if no parameters specified) */
axis: function(axis) {
if (arguments.length == 0) {
return this._axis;
}
this._axis = axis;
return this;
},
/* Get/set the axis colour.
@param lineColour (string) the axis line colour
@return (GChartAxis) the axis object or
(string) the axis line colour (if no parameters specified) */
color: function(lineColour) {
if (arguments.length == 0) {
return this._lineColor;
}
this._lineColor = lineColour;
return this;
},
/* Get/set the axis labels.
@param labels (string[]) the labels for this axis
@return (GChartAxis) the axis object or
(string[]) the axis labels (if no parameters specified) */
labels: function(labels) {
if (arguments.length == 0) {
return this._labels;
}
this._labels = labels;
return this;
},
/* Get/set the axis label positions.
@param positions (number[]) the positions of the labels
@return (GChartAxis) the axis object or
(number[]) the axis label positions (if no parameters specified) */
positions: function(positions) {
if (arguments.length == 0) {
return this._positions;
}
this._positions = positions;
return this;
},
/* Get/set the axis range.
@param rangeStart (number) start of range
@param rangeEnd (number) end of range
@param rangeInterval (number, optional) interval between values in the range
@return (GChartAxis) the axis object or
(number[3]) the axis range start, end, and interval (if no parameters specified) */
range: function(start, end, interval) {
if (arguments.length == 0) {
return this._range;
}
this._range = [start, end, interval || null];
return this;
},
/* Get/set the axis labels' style.
@param colour (string) the labels' colour
@param alignment (string, optional) the labels' alignment
@param size (number, optional) the labels' size
@return (GChartAxis) the axis object or
(object) the axis style with attributes color, alignment, and size
(if no parameters specified) */
style: function(colour, alignment, size) {
if (arguments.length == 0) {
return (!this._color && !this._alignment && !this._size ? null :
{color: this._color, alignment: this._alignment, size: this._size});
}
this._color = colour;
this._alignment = alignment;
this._size = size;
return this;
},
/* Get/set the axis drawing control.
@param drawing (string) the drawing control: line, ticks, both
@return (GChartAxis) the axis object or
(string) the axis drawing control (if no parameters specified) */
drawing: function(drawing) {
if (arguments.length == 0) {
return this._drawing;
}
this._drawing = drawing;
return this;
},
/* Get/set the axis tick style.
@param colour (string) the colour of the tick marks
@param length (number, optional) the length of the tick marks,
negative values draw inside the chart or
(string, optional) list of lengths, comma-separated
@return (GChartAxis) the axis object or
(object) the axis tick style with attributes color and length
(if no parameters specified) */
ticks: function(colour, length) {
if (arguments.length == 0) {
return (!this._tickColor && !this._tickLength ? null :
{color: this._tickColor, length: this._tickLength});
}
this._tickColor = colour;
this._tickLength = length;
return this;
},
/* Get/set the number format for the axis.
@param type (object) containing all these settings or
(string) 'f' for floating point, 'p' for percentage,
'e' for scientific notation, 'c<CUR>' for currency (as specified by CUR)
@param prefix (string, optional) text appearing before the number
@param suffix (string, optional - can only be present if prefix is present)
text appearing after the number
@param precision (number, optional) the number of decimal places
@param showX (boolean, optional) true to show the x-value, false for the y-value
@param zeroes (boolean or number, optional - can only be present if showX is present)
true to display trailing zeroes, number for that many trailing zeroes
@param separators (boolean, optional - can only be present if showX and zeroes are present)
true to display group separators
@return (GChartAxis) the axis object or
(object) the axis format (if no parameters specified) */
format: function(type, prefix, suffix, precision, showX, zeroes, separators) {
if (arguments.length == 0) {
return this._format;
}
this._format = initNumberFormat(type, prefix, suffix, precision, showX, zeroes, separators);
return this;
}
});
/* Initialise a number format specification. */
function initNumberFormat(type, prefix, suffix, precision, showX, zeroes, separators) {
if (typeof type == 'object') {
return type;
}
if (typeof prefix == 'number') {
separators = showX;
zeroes = precision;
showX = suffix;
precision = prefix;
suffix = '';
prefix = '';
}
if (typeof prefix == 'boolean') {
separators = precision;
zeroes = suffix;
showX = prefix;
precision = 0;
suffix = '';
prefix = '';
}
if (typeof suffix == 'number') {
separators = zeroes;
zeroes = showX;
showX = precision;
precision = suffix;
suffix = '';
}
if (typeof suffix == 'boolean') {
separators = showX;
zeroes = precision;
showX = suffix;
precision = 0;
suffix = '';
}
if (typeof precision == 'boolean') {
separators = zeroes;
zeroes = showX;
showX = precision;
precision = 0;
}
return {type: type, prefix: prefix || '', suffix: suffix || '', precision: precision || '',
showX: showX || false, zeroes: zeroes || false, separators: separators || false};
}
/* jQuery extend now ignores nulls!
@param target (object) the object to extend
@param props (object) the new attributes to add
@return (object) the updated object */
function extendRemove(target, props) {
$.extend(target, props);
for (var name in props) {
if (props[name] == null) {
target[name] = null;
}
}
return target;
}
/* Attach the Google chart functionality to a jQuery selection.
@param command (string) the command to run (optional, default 'attach')
@param options (object) the new settings to use for these Google chart instances
@return (jQuery object) for chaining further calls */
$.fn.gchart = function(options) {
var otherArgs = Array.prototype.slice.call(arguments, 1);
if (options == 'current') {
return $.gchart['_' + options + 'GChart'].
apply($.gchart, [this[0]].concat(otherArgs));
}
return this.each(function() {
if (typeof options == 'string') {
$.gchart['_' + options + 'GChart'].
apply($.gchart, [this].concat(otherArgs));
}
else {
$.gchart._attachGChart(this, options);
}
});
};
/* Initialise the Google chart functionality. */
$.gchart = new GChart(); // singleton instance
})(jQuery); | /runestone-petljadoc-3.1.2.5.tar.gz/runestone-petljadoc-3.1.2.5/runestone/animation/jqchart/jquery.gchart.js | 0.578686 | 0.697032 | jquery.gchart.js | pypi |
__author__ = 'isaiahmayerchak'
from docutils import nodes
from docutils.parsers.rst import directives
from runestone.common.runestonedirective import RunestoneIdDirective, RunestoneNode
#add directives/javascript/css
def setup(app):
app.add_directive('reveal', RevealDirective)
app.add_js_file('reveal.js')
app.add_node(RevealNode, html=(visit_reveal_node, depart_reveal_node))
class RevealNode(nodes.General, nodes.Element, RunestoneNode):
def __init__(self,content, **kwargs):
super(RevealNode,self).__init__(**kwargs)
self.reveal_options = content
def visit_reveal_node(self, node):
#Set options and format templates accordingly
if 'modal' in node.reveal_options:
node.reveal_options['modal'] = 'data-modal'
else:
node.reveal_options['modal'] = ''
if 'modaltitle' in node.reveal_options:
temp = node.reveal_options['modaltitle']
node.reveal_options['modaltitle'] = '''data-title=''' + '"' + temp + '"'
else:
node.reveal_options['modaltitle'] = ''
res = TEMPLATE_START % node.reveal_options
self.body.append(res)
def depart_reveal_node(self,node):
#Set options and format templates accordingly
res = TEMPLATE_END % node.reveal_options
self.body.append(res)
#Templates to be formatted by node options
TEMPLATE_START = '''
<div data-component="reveal" id="%(divid)s" %(modal)s %(modaltitle)s %(showtitle)s %(hidetitle)s>
'''
TEMPLATE_END = '''
</div>
'''
class RevealDirective(RunestoneIdDirective):
"""
.. reveal:: identifier
:showtitle: Text on the 'show' button--default is "Show"
:hidetitle: Text on the 'hide' button--default is "Hide"
:modal: Boolean--if included, revealed display will be a modal
:modaltitle: Title of modal dialog window--default is "Message from the author"
Content everything here will be hidden until revealed
Content It can be a lot...
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
has_content = True
option_spec = RunestoneIdDirective.option_spec.copy()
option_spec.update({"showtitle":directives.unchanged,
"hidetitle":directives.unchanged,
"modal":directives.flag,
"modaltitle":directives.unchanged})
def run(self):
"""
process the reveal directive and generate html for output.
:param self:
:return:
.. reveal:: identifier
:showtitle: Text on the 'show' button--default is "Show"
:hidetitle: Text on the 'hide' button--default is "Hide"
:modal: Boolean--if included, revealed display will be a modal
:modaltitle: Title of modal dialog window--default is "Message from the author"
Content
...
"""
super(RevealDirective, self).run()
self.assert_has_content() # make sure reveal has something in it
if not 'showtitle' in self.options:
self.options['showtitle'] = 'data-showtitle="Show"'
else:
self.options['showtitle'] = '''data-showtitle=''' + '"' + self.options['showtitle'] + '"'
if not 'hidetitle' in self.options:
self.options['hidetitle'] = 'data-hidetitle="Hide"'
else:
self.options['hidetitle'] = '''data-hidetitle=''' + '"' + self.options['hidetitle'] + '"'
reveal_node = RevealNode(self.options, rawsource=self.block_text)
reveal_node.source, reveal_node.line = self.state_machine.get_source_and_line(self.lineno)
self.state.nested_parse(self.content, self.content_offset, reveal_node)
return [reveal_node] | /runestone-petljadoc-3.1.2.5.tar.gz/runestone-petljadoc-3.1.2.5/runestone/reveal/reveal.py | 0.450601 | 0.174727 | reveal.py | pypi |
__author__ = 'isaiahmayerchak'
from docutils import nodes
from docutils.parsers.rst import directives
from runestone.common.runestonedirective import RunestoneIdDirective, RunestoneNode
#add directives/javascript/css
class TimedNode(nodes.General, nodes.Element, RunestoneNode):
def __init__(self, content, **kwargs):
super(TimedNode,self).__init__(**kwargs)
self.timed_options = content
def visit_timed_node(self, node):
#Set options and format templates accordingly
if 'timelimit' not in node.timed_options:
node.timed_options['timelimit'] = ''
else:
node.timed_options['timelimit'] = 'data-time=' + str(node.timed_options['timelimit'])
if 'noresult' in node.timed_options:
node.timed_options['noresult'] = 'data-no-result'
else:
node.timed_options['noresult'] = ''
if 'nofeedback' in node.timed_options:
node.timed_options['nofeedback'] = 'data-no-feedback'
else:
node.timed_options['nofeedback'] = ''
if 'notimer' in node.timed_options:
node.timed_options['notimer'] = 'data-no-timer'
else:
node.timed_options['notimer'] = ''
if 'fullwidth' in node.timed_options:
node.timed_options['fullwidth'] = 'data-fullwidth'
else:
node.timed_options['fullwidth'] = ''
res = TEMPLATE_START % node.timed_options
self.body.append(res)
def depart_timed_node(self,node):
#Set options and format templates accordingly
res = TEMPLATE_END % node.timed_options
self.body.append(res)
#Templates to be formatted by node options
TEMPLATE_START = '''
<ul data-component="timedAssessment" %(timelimit)s id="%(divid)s" %(noresult)s %(nofeedback)s %(notimer)s %(fullwidth)s>
'''
TEMPLATE_END = '''</ul>
'''
class TimedDirective(RunestoneIdDirective):
"""
.. timed:: identifier
:timelimit: Number of minutes student has to take the timed assessment--if not provided, no time limit
:noresult: Boolean, doesn't display score
:nofeedback: Boolean, doesn't display feedback
:notimer: Boolean, doesn't show timer
:fullwidth: Boolean, allows the items in the timed assessment to take the full width of the screen...
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
has_content = True
option_spec = {"timelimit":directives.positive_int,
"noresult":directives.flag,
"nofeedback":directives.flag,
"fullwidth":directives.flag,
"notimer":directives.flag}
def run(self):
"""
process the timed directive and generate html for output.
:param self:
:return:
.. timed:: identifier
:timelimit: Number of minutes student has to take the timed assessment--if not provided, no time limit
:noresult: Boolean, doesn't display score
:nofeedback: Boolean, doesn't display feedback
:notimer: Boolean, doesn't show timer
:fullwidth: Boolean, allows the items in the timed assessment to take the full width of the screen
...
"""
super(TimedDirective, self).run()
self.assert_has_content() # make sure timed has something in it
timed_node = TimedNode(self.options, rawsource=self.block_text)
timed_node.source, timed_node.line = self.state_machine.get_source_and_line(self.lineno)
self.state.nested_parse(self.content, self.content_offset, timed_node)
return [timed_node] | /runestone-petljadoc-3.1.2.5.tar.gz/runestone-petljadoc-3.1.2.5/runestone/assess/timedassessment.py | 0.523908 | 0.251177 | timedassessment.py | pypi |
<h2>Multiple Choice</h2>
<ul data-component="multiplechoice" data-multipleanswers="true" data-random id="question-1">
The Question can go right here.
<li data-component="answer" id="123" >Answer One</li>
<li data-component="feedback" for="123">Feedback for One</li>
<li data-component="answer" id="456">Answer Two</li>
<li data-component="feedback" for="456">Feedback for Two</li>
<li data-component="answer" id="789" data-correct>Answer Three</li>
<li data-component="feedback" for="789">Feedback for Three</li>
<li data-component="answer" id="1011" data-correct>Answer Four</li>
<li data-component="feedback" for="1011">Feedback for Four</li>
<li data-component="answer" id="1112" data-correct>Answer Five</li>
<li data-component="feedback" for="1112">Feedback for Five</li>
</ul>
Here the <code>ul</code> tag represents the entire component to be rendered.
Each 2 <code>li</code> answer/feedback pair represents a possible answer to the question and the feedback to be provided if that answer is selected.
<ul>
<li><code>data-component</code> identifies this as a multiple choice component</li>
<li><code>class</code> Standard CSS class options </li>
<li><code>id</code> must be unique in the document</li>
<li><code>data-multipleanswers</code> REQUIRED Attribute. Possible values are true and false. Determines whether the question can take one or more answers on submission (radio vs checkbox).</li>
<li><code>data-random</code> Randomizes the order that the possible answers are displayed on the page</li>
<br />
<p>Attributes of the question tags</p>
<br />
<li><code>id</code> must be unique per MC component</li>
<li><code>for</code> must match the id of the option that the feedback is for</li>
<li><code>data-correct</code> indicates that this option is a correct answer. If <code>data-multipleanswers</code> is true, all answers with the attribute will be considered correct, otherwise the first answer with the <code>data-correct></code> attribute will be considered correct.</li>
</ul>
<h2>Fill In the Blank</h2>
<p data-component="fillintheblank" data-casei="false" id="fill1412" >
<span data-blank>Without using the activecode infixToPostfix function, convert the following expression to postfix <code>10 + 3 * 5 / (16 - 4)</code>
<span data-answer id="blank2_answer">\\b10\\s+3\\s+5\\s*\\*\\s*16\\s+4\\s*-\\s*/\\s*\\+</span>
<span data-feedback="regex" id="feedback1">10.*3.*5.*16.*4</span>
<span data-feedback="text" for="feedback1">The numbers appear to be in the correct order check your operators</span>
<span data-feedback="regex" id="feedback2">.*</span>
<span data-feedback="text" for="feedback2">Remember the numbers will be in the same order as the original equation</span>
</span>
</p>
Here the <code>p</code> tag represents the entire component.
The <code>data-blank</code><code>span</code>Holds the question text.
The <code>data-answer</code><code>span</code>Holds the correct regular expression.
Each regex,text <code>span</code> pair represents a point of feedback for incorrect answers.
Multiple blanks can also be put into the same FITB question as shown here.
<p data-component="fillintheblank" data-casei="false" id="fill1412" >
<span data-blank>Give me a string that has an 'e' in it. Now.<span data-answer id="blank2_answer">e</span>
<span data-feedback="regex" id="feedback1">f</span>
<span data-feedback="text" for="feedback1">Oops</span>
<span data-feedback="regex" id="feedback2">.*</span>
<span data-feedback="text" for="feedback2">There's no e there!</span>
</span>
<span data-blank>Gimme an f. Please.<span data-answer id="blank12_answer">f</span>
<span data-feedback="regex" id="feedback3">e</span>
<span data-feedback="text" for="feedback3">Wrong.</span>
<span data-feedback="regex" id="feedback4">.*</span>
<span data-feedback="text" for="feedback4">There's no f in that string!</span>
</span>
<span data-blank>Show me 44!<span data-answer id="blank3_answer">44</span>
<span data-feedback="regex" id="feedback5">1</span>
<span data-feedback="text" for="feedback5">nope</span>
<span data-feedback="regex" id="feedback6">4</span>
<span data-feedback="text" for="feedback6">close</span>
<span data-feedback="regex" id="feedback7">.*</span>
<span data-feedback="text" for="feedback7">Sorry bro</span>
</span>
</p>
<ul>
<li><code>data-casei</code> Determines if the answer is case insensitive</li>
<li><code>id</code> Must be unique in the document</li>
</ul>
<h2>Timed</h2>
<ul data-component="timedAssessment" data-time id="timed_1">
<ul data-component="multiplechoice" data-multipleanswers="true" data-random id="question_1">
The Question can go right here.
<li data-component="answer" id="123" >Answer One</li>
<li data-component="feedback" for="123">Feedback for One</li>
<li data-component="answer" id="456">Answer Two</li>
<li data-component="feedback" for="456">Feedback for Two</li>
<li data-component="answer" id="789" data-correct>Answer Three</li>
<li data-component="feedback" for="789">Feedback for Three</li>
</ul>
<ul data-component="multiplechoice" id="question_2">
The Question can go right here.
<li data-component="answer" id="123" >Answer One</li>
<li data-component="feedback" for="123">Feedback for One</li>
<li data-component="answer" id="456">Answer Two</li>
<li data-component="feedback" for="456">Feedback for Two</li>
<li data-component="answer" id="789" data-correct>Answer Three</li>
<li data-component="feedback" for="789">Feedback for Three</li>
</ul>
<p data-component="fillintheblank" data-casei="false" id="fill1412" >
<span data-blank>Give me a string that has an 'e' in it. Now.<span data-answer id="blank2_answer">e</span>
<span data-feedback="regex" id="feedback1">f</span>
<span data-feedback="text" for="feedback1">Oops</span>
<span data-feedback="regex" id="feedback2">.*</span>
<span data-feedback="text" for="feedback2">There's no e there!</span>
</span>
</p>
</ul>
Here the outermost <code>ul</code> tag marks the timed element, and the tags inside represent the questions in the timed assessment.
Currently only 1 timed assessment is allowed per page.
<ul>
<li><code>data-time</code> Can either be set equal to the time limit for the assessment in minutes or left blank, in which case the assessment will keep track of how long it takes to complete the assessment.</li>
<li><code>id</code> Must be unique in the document</li>
<li><code>data-no-result</code> If present, it won't display the score to the user after the assessment is finished</li>
<li><code>data-no-feedback</code> If present, feedback won't be displayed.</li>
</ul>
| /runestone-petljadoc-3.1.2.5.tar.gz/runestone-petljadoc-3.1.2.5/runestone/assess/README.md | 0.521715 | 0.700152 | README.md | pypi |
from docutils import nodes
from docutils.parsers.rst import directives
from runestone.common.runestonedirective import RunestoneIdDirective, RunestoneNode
__author__ = 'bmiller'
def setup(app):
app.add_directive('question', QuestionDirective)
app.add_node(QuestionNode, html=(visit_question_node, depart_question_node))
class QuestionNode(nodes.General, nodes.Element, RunestoneNode):
def __init__(self, content, **kwargs):
super(QuestionNode, self).__init__(**kwargs)
self.question_options = content
def visit_question_node(self, node):
# Set options and format templates accordingly
env = node.document.settings.env
if not hasattr(env, 'questioncounter'):
env.questioncounter = 0
if 'number' in node.question_options:
env.questioncounter = int(node.question_options['number'])
else:
env.questioncounter += 1
node.question_options['number'] = 'start={}'.format(env.questioncounter)
res = TEMPLATE_START % node.question_options
self.body.append(res)
def depart_question_node(self, node):
# Set options and format templates accordingly
res = TEMPLATE_END % node.question_options
delimiter = "_start__{}_".format(node.question_options['divid'])
self.body.append(res)
# Templates to be formatted by node options
TEMPLATE_START = '''
<div data-component="question" class="full-width container question" id="%(divid)s" >
<ol %(number)s class=arabic><li class="alert alert-warning">
'''
TEMPLATE_END = '''
</li></ol>
</div>
'''
class QuestionDirective(RunestoneIdDirective):
"""
.. question:: identifier
:number: Force a number for this question
Content everything here is part of the question
Content It can be a lot...
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
has_content = True
option_spec = RunestoneIdDirective.option_spec.copy()
option_spec.update({'number': directives.positive_int})
def run(self):
super(QuestionDirective, self).run()
self.assert_has_content() # make sure question has something in it
self.options['name'] = self.arguments[0].strip()
question_node = QuestionNode(self.options, rawsource=self.block_text)
question_node.source, question_node.line = self.state_machine.get_source_and_line(self.lineno)
self.add_name(question_node)
self.state.nested_parse(self.content, self.content_offset, question_node)
return [question_node] | /runestone-petljadoc-3.1.2.5.tar.gz/runestone-petljadoc-3.1.2.5/runestone/question/question.py | 0.450601 | 0.27349 | question.py | pypi |
dftTableAttr = 'cellspacing="0" cellpadding="10"'
class Matrix :
def __init__ (self, nrows=1, ncols=1, data=None,
dftFormat="", dftStyle="", title="",
tableAttr=dftTableAttr, tableHeaders=None,
Expand=True) :
self.nrows = nrows
self.ncols = ncols
self.values = {}
self.expanded = Expand
if Expand :
# get attributes only on the main Matrix
self.dftFormat = dftFormat
self.dftStyle = dftStyle
self.title = title
self.tableAttr = tableAttr
self.tableHeaders = tableHeaders
self.format = Matrix(nrows, ncols, Expand=False)
self.style = Matrix(nrows, ncols, Expand=False)
if data :
if type(data) == type({}) : data=dictToLol(data)
if type(data) == type([]) :
self.populate(data)
def __getitem__(self, coords) :
row, col = coords
return self.values.get((row,col))
def __setitem__(self, coords, value) :
row, col = coords
self.values[(row,col)] = value
self.nrows = max(self.nrows,row+1)
self.ncols = max(self.ncols,col+1)
if self.expanded :
self.format.nrows = self.nrows
self.format.ncols = self.ncols
self.style.nrows = self.nrows
self.style.ncols = self.ncols
return value
#===========================================
def setrowVals(self, row, values) :
"set each column to a seperate value"
col = 0
for col in range(len(values)) :
self.__setitem__((row,col),values[col])
col += 1
def setrowVal(self, row, value) :
"set all columns to the same value"
col = 0
while col < self.ncols :
self.__setitem__((row,col),value)
col += 1
def getrow (self, row) :
vals = []
for c in range(self.ncols) :
vals.append(self.__getitem__( (row,c) ))
return vals
#===========================================
def setcolVals(self, col, values) :
"set each row to a seperate value"
row = 0
for row in range(len(values)) :
self.__setitem__((row,col),values[row])
row += 1
def setcolVal(self, col, value) :
"set all rowumns to the same value"
row = 0
while row < self.nrows :
self.__setitem__((row,col),value)
row += 1
def getcol (self, col) :
vals = []
for r in range(self.nrows) :
vals.append(self.__getitem__( (r,col) ))
return vals
#===========================================
def populate(self, lists) :
"Fill self from a list of lists"
nRows = len(lists)
nCols = max([len(l) for l in lists])
for row in range(len(lists)) :
vals = lists[row]
if type(vals) != list : vals = [vals] # make sing col
self.setrowVals(row, vals)
def renderHtml(self,wrap=None) :
lins = ["","<table %s>" % self.tableAttr]
if self.title : lins[0] = "<div>%s</div>" % self.title
headers = self.tableHeaders
if headers :
lins.append("<tr><th>"+"</th><th>".join(map(str,headers))+
"</th></tr>")
for row in range(self.nrows) :
rowLin = [" <tr>"]
vals = self.getrow(row)
if self.format : formats = self.format.getrow(row)
else : formats = ['']*self.ncols
if self.style : styles = self.style.getrow(row)
else : styles = ['']*self.ncols
for c in range(self.ncols) :
val = vals[c]; style=styles[c]; format=formats[c]
if val == None : val = ""
if not format : format = self.dftFormat
if format :
if type(format)==type("") : val = format % val
else : val = format(val)
if not style : style = self.dftStyle
if style : cell = '<td style="%s">%s</td>' % (style,val)
else : cell = '<td>%s</td>' % val
if wrap and c>0 and c%wrap==0 : cell="</tr><tr>"+cell
rowLin.append(cell)
rowLin.append("</tr>")
lins.append("".join(rowLin))
lins.append("</table>")
return "\n".join(lins)
def __str__ (self) :
return "Matrix-%dx%d" % (self.nrows,self.ncols)
#===========================================
typeSeq = (type([]), type((1,2)))
def dictToLol(dic) :
"Convert dict to a list of lists"
keys = list(dic.keys()); keys.sort()
lists = []
for key in keys :
val = dic[key]
if type(val) not in typeSeq : val = [val]
lists.append([key]+list(val))
return lists | /runestone-petljadoc-3.1.2.5.tar.gz/runestone-petljadoc-3.1.2.5/runestone/codelens/matrix.py | 0.522933 | 0.307735 | matrix.py | pypi |
from enum import Enum
from re import S
class _PropAttr(Enum):
key = 'key'
value = 'value'
class Properties:
_props = []
def __init__(self, equals_character:str = '='):
self.equals_character = equals_character
def _is_a_valid_line(self, line:str) -> bool:
return line and not line.strip() == '' and not line.strip().startswith('#')
def _get_equals_character_index(self, line:str) -> int:
try:
index:int = line.index(self.equals_character)
return index
except:
return None
def _parse_property_value(self, original_value:str) -> str:
value:str = original_value.strip()
if value.startswith('"') and value.endswith('"'):
value = value[1:len(value) - 1]
return value
def _create_property(self, key:str, value:str):
return {
_PropAttr.key.value: key,
_PropAttr.value.value: value
}
def _add_line_to_properties(self, line:str):
if self._is_a_valid_line(line):
index:int = line.index(self.equals_character)
value:str = None
if index:
value = self._parse_property_value(line[index+1:len(line)])
else:
index = len(line)
key:str = line[0:index].strip()
prop = self._create_property(key, value)
self._props.append(prop)
def load(self, properties_file_path:str):
with open(properties_file_path, 'r') as file:
line:str = 'INIT'
while line:
line = file.readline()
if line:
self._add_line_to_properties(line)
def get(self, key:str) -> str:
result_item = list(filter(lambda item : item[_PropAttr.key.value] == key, self._props))
return result_item[0][_PropAttr.value.value] if len(result_item) > 0 else None
def put(self, key:str, value:str):
item_found = self.get(key)
if item_found == None:
self._props.append(self._create_property(key, value))
else:
_tmp_props = []
for prop in self._props:
if prop[_PropAttr.key.value] == key:
_tmp_props.append(self._create_property(key, value))
else:
_tmp_props.append(prop)
self._props = _tmp_props
def _prop_to_string(self, prop):
if prop == None:
return ''
return prop[_PropAttr.key.value] + '=' + (prop[_PropAttr.value.value] if prop[_PropAttr.key.value] != None else '')
def dump(self, output_file_path:str):
with open(output_file_path, 'w') as file:
for prop in self._props:
file.write(self._prop_to_string(prop))
def _prop_to_string(self, prop) -> str:
if prop == None:
return ''
prop_str:str = '[{key}={value}]'.format(key = prop[_PropAttr.key.value], value = prop[_PropAttr.value.value])
return prop_str
def to_string(self) -> str:
if self._props and len(self._props) > 0:
str_props:str='{'
for prop in self._props:
str_props += self._prop_to_string(prop) + ','
return str_props[0:len(str_props) - 1] + '}'
else:
return '{}' | /runfalcon_build_tools-1.4.1-py3-none-any.whl/runfalconbuildtools/properties.py | 0.533519 | 0.220521 | properties.py | pypi |
# Runfile
Runfiles are generic task files defined in Markdown.
For an example, look at [this project’s
Runfile](https://github.com/awkspace/runfile/blob/master/Runfile.md).
## Installation
Install from [PyPI](https://pypi.org/project/runfile/):
```sh
pip install runfile
```
Then add `source <(run --bash-completion)` to your `~/.bashrc` or `~/.zshrc` to
enable tab completion.
## Usage
```sh-session
$ run --help
usage: run [-h] [-f FILENAME] [-u] [--containers] [-l] [--bash-completion] [target]
positional arguments:
target
optional arguments:
-h, --help show this help message and exit
-f FILENAME, --file FILENAME
Path to Runfile, defaults to Runfile.md
-u, --update Update includes
--containers Allow steps to run in containers where applicable
-l, --list-targets List targets and exit
--bash-completion Print bash completion script
```
## Format
Runfiles must include a first-level header at the top of the file.
Each target is defined as a second-level header. Target names can consist of
alphanumeric characters and underscores.
The first paragraph of a target describes the target and will be printed when
`run` is called with no arguments.
Code blocks in targets are executed in the order they are defined. The syntax
highlighting of the code block determines the executable that will run it. For
example, a code block beginning with `python` will execute the code using
`/usr/bin/env python`.
A `yaml` code block represents Runfile or target configuration.
A `dockerfile` code block defines the container that will be used to execute the
given target if `run` is called with `--containers`. To use an existing Docker
image, create a `dockerfile` block that contains only `FROM your_image_name`.
Code blocks directly underneath the top-level header are considered part of a
“global target.” The contents of this global target are executed before any
other target. This is useful for setting variables or performing checks.
## Runfile Configuration
Runfiles are configured by an optional `yaml` block under the top level header.
```yaml
# List of Runfiles to include. Included Runfiles are automatically appended to
# the bottom of the current Runfile. Once a Runfile has been retrieved, it will
# not be updated until run is invoked with --update.
#
# Each element in the list has a key and a value. The value is the local or
# remote path to the other Runfile; the key is the Runfile alias and can be used
# in other configuration to explicitly reference included targets, e.g.
# my_included_runfile/some_target.
#
# Use .netrc to fetch Runfiles behind authentication.
includes:
- example_one: https://example.com/1.md
- example_two: https://example.com/2.md
```
## Target Configuration
Targets are configured by an optional `yaml` block under their headers.
```yaml
# If the last run was successful, do not rerun this target until this much time
# has passed. Defaults to 0 (no caching). -1 or null caches this target
# indefinitely until invalidated by another target or by a rebuild of a required
# (upstream) target.
expires: 5m
# A list of other targets that should be completed before this target is run.
# Glob matches are supported.
requires:
- foo
- bar
# A list of other targets that should be immediately expired after a successful
# run of this target. Glob matches are supported. For example, a "clean" target
# may invalidate '*'.
invalidates:
- baz
```
## Examples
### A “Hello World” Runfile
````markdown
# Hello world
A Runfile that says hello!
## hello
Say hello.
```sh
echo "Hello world!"
```
````
Run it:
```sh-session
$ run
📜 Hello world
A Runfile that says hello!
🎯 Targets
hello: Say hello.
$ run hello
🎯 hello
⏳ Running hello...
Hello world!
✔️ Completed hello. (0.02s)
SUCCESS in 0.04s
---
✔️ Completed hello. (0.02s)
```
### Persistent Values
Use `run_set <key> <value>` to set persistent values. These values are stored
between runs and can be referenced in other code blocks than the one they were
set in. They are also set as environment variables in subsequent code blocks.
Values can be retrieved by `run_get <key>` or by accessing the environment
variable of the same name.
````markdown
# Hello world with two languages
A Runfile that says hello using two languages!
## write_message
Writes a message to the Runfile cache using shell.
```sh
run_set "message" "Hello world!"
```
## print_message
Prints a stored message from the Runfile cache using Python.
```python
import os
print(os.environ["message"])
```
## delete_message
Deletes the stored message using shell.
```sh
run_del "message"
```
````
Run it:
```sh-session
$ run write_message
🎯 write_message
⏳ Running write_message...
✔️ Completed write_message. (0.24s)
SUCCESS in 0.27s
---
✔️ Completed write_message. (0.24s)
$ run print_message
🎯 print_message
⏳ Running print_message...
Hello world!
✔️ Completed print_message. (0.03s)
SUCCESS in 0.06s
---
✔️ Completed print_message. (0.03s)
```
This works, but now there’s a problem: running `print_message` before
`write_message` results in an error.
```sh-session
$ run delete_message
🎯 delete_message
⏳ Running delete_message...
1
✔️ Completed delete_message. (0.25s)
SUCCESS in 0.28s
---
✔️ Completed delete_message. (0.25s)
$ run print_message
🎯 print_message
⏳ Running print_message...
Traceback (most recent call last):
File "/tmp/tmp5d8zepzl/run", line 2, in <module>
print(os.environ["message"])
File "os.py", line 679, in __getitem__
raise KeyError(key) from None
KeyError: 'message'
❌ Failed print_message. (0.05s)
FAILURE in 0.05s
---
❌ Failed print_message. (0.05s)
```
### Dependencies
To fix this, use the `requires` directive in the `print_message` target
configuration.
````markdown
# Hello world with dependencies
A Runfile that says hello, but less broken this time.
## write_message
Writes a message to the Runfile cache using shell.
```sh
run_set "message" "Hello world!"
```
## print_message
Prints a stored message from the Runfile cache using Python.
```yaml
requires:
- write_message
```
```python
import os
print(os.environ["message"])
```
## delete_message
Deletes the stored message using shell.
```sh
run_del "message"
```
````
Now the message will be written every time `print_message` is run.
```sh-session
$ run delete_message
🎯 delete_message
⏳ Running delete_message...
1
✔️ Completed delete_message. (0.26s)
SUCCESS in 0.29s
---
✔️ Completed delete_message. (0.26s)
$ run print_message
🎯 print_message
⏳ Running write_message...
✔️ Completed write_message. (0.26s)
⏳ Running print_message...
Hello world!
✔️ Completed print_message. (0.04s)
SUCCESS in 0.37s
---
✔️ Completed write_message. (0.26s)
✔️ Completed print_message. (0.04s)
```
### Caching
However, constantly rerunning a dependency with a cacheable value is not always
an ideal situation. Some targets may take a while to run.
We can simulate this by adding a `sleep` command to `write_message`, then
configuring its `expires` directive such that it runs at most once per day.
````markdown
# Hello world with dependencies and caching
A Runfile that says hello, but it takes a while.
## write_message
```yaml
expires: 24h
```
Writes a message to the Runfile cache using shell.
```sh
sleep 5 # Simulate a long operation
run_set "message" "Hello world!"
```
## print_message
Prints a stored message from the Runfile cache using Python.
```yaml
requires:
- write_message
```
```python
import os
print(os.environ["message"])
```
## delete_message
Deletes the stored message using shell.
```sh
run_del "message"
```
````
Executing `run print_message` takes a while the first time:
```sh-session
$ run print_message
🎯 print_message
⏳ Running write_message...
✔️ Completed write_message. (5.25s)
⏳ Running print_message...
Hello world!
✔️ Completed print_message. (0.03s)
SUCCESS in 5.34s
---
✔️ Completed write_message. (5.25s)
✔️ Completed print_message. (0.03s)
```
But running it again will skip the `write_message` target:
``` sh-session
$ run print_message
🎯 print_message
💾 Used cache for write_message.
⏳ Running print_message...
Hello world!
✔️ Completed print_message. (0.07s)
SUCCESS in 0.13s
---
💾 Used cache for write_message.
✔️ Completed print_message. (0.07s)
```
This saves time, but it also introduces a new problem. Running `delete_message`
will delete the message, but the Runfile doesn’t know that `write_message` needs
to be rerun before the next `print_message`!
### Cache Invalidation
The `invalidates` directive can be used to indicate that running a specific
target will result in the cached values of other targets being invalid.
````markdown
# Hello world with dependencies and caching
A Runfile that says hello, but it takes a while.
## write_message
```yaml
expires: 24h
```
Writes a message to the Runfile cache using shell.
```sh
sleep 5 # Simulate a long operation
run_set "message" "Hello world!"
```
## print_message
Prints a stored message from the Runfile cache using Python.
```yaml
requires:
- write_message
```
```python
import os
print(os.environ["message"])
```
## delete_message
Deletes the stored message using shell.
```yaml
invalidates:
- write_message
```
```sh
run_del "message"
```
````
Now running `delete_message` invalidates the `write_message` cache.
```sh-session
$ run delete_message
🎯 delete_message
⏳ Running delete_message...
1
✔️ Completed delete_message. (0.28s)
SUCCESS in 0.31s
---
✔️ Completed delete_message. (0.28s)
$ run print_message
🎯 print_message
⏳ Running write_message...
✔️ Completed write_message. (5.24s)
⏳ Running print_message...
Hello world!
✔️ Completed print_message. (0.06s)
SUCCESS in 5.37s
---
✔️ Completed write_message. (5.24s)
✔️ Completed print_message. (0.06s)
```
Changing the content of a code block will also invalidate the cache of a target.
And, like `make`, a target with a dependency run more recently than the target
itself will invalidate that target’s cache.
## Running Targets Inside Containers
Add a [Dockerfile](https://docs.docker.com/engine/reference/builder/) block at
the top of a target and Runfile will build a container accordingly, then mount
the current directory into the Docker container before running commands.
By default, Runfiles will not execute these code blocks. Most of the time,
Runfiles are expected to be run on a user’s system, with dependencies manually
installed. However, containers are useful for executing Runfiles on CI systems
where dependencies are not controlled by the user.
To execute a Runfile target in containerized mode, use the `--containers` flag.
Container images will be cached until the content of a `dockerfile` block
changes.
## Including Other Runfiles
Other Runfiles can be included via an `includes` directive in the Runfile
configuration block.
Included Runfiles will automatically append themselves to the main Runfile. This
promotes consolidation of common tasks to shared files without sacrificing the
portability and stability of any given Runfile.
To refresh included Runfiles, execute `run` with the `--update` flag.
## Configuration Options
If you hate fun, set `RUNFILE_NO_EMOJI=1` to disable icons from the output.
| /runfile-1.0.7.tar.gz/runfile-1.0.7/README.md | 0.870542 | 0.800926 | README.md | pypi |
from collections import defaultdict
class Data:
"""
A class to hold ground truth or predictions data in an easy to work with format.
Note that any time they appear, bounding boxes are [x, y, width, height] where
x,y are the coordinates of the top_left corner, and masks are either a list of
polygons or pycocotools RLEs.
Also, don't mix ground truth with predictions. Keep them in separate data objects.
'max_dets' specifies the maximum number of detections the model is allowed to output for a given image.
"""
def __init__(self, name: str, max_dets: int = 100):
self.name = name
self.max_dets = max_dets
self.classes = {} # Maps class ID to class name
self.annotations = (
[]
) # Maps annotation ids to the corresponding annotation / prediction
# Maps an image id to an image name and a list of annotation ids
self.images = defaultdict(lambda: {"name": None, "anns": []})
def _get_ignored_classes(self, image_id: int) -> set:
anns = self.get(image_id)
classes_in_image = set()
ignored_classes = set()
for ann in anns:
if ann["ignore"]:
if (
ann["class"] is not None
and ann["bbox"] is None
and ann["mask"] is None
):
ignored_classes.add(ann["class"])
else:
classes_in_image.add(ann["class"])
return ignored_classes.difference(classes_in_image)
def _make_default_class(self, id: int):
"""(For internal use) Initializes a class id with a generated name."""
if id not in self.classes:
self.classes[id] = "Class " + str(id)
def _make_default_image(self, id: int):
if self.images[id]["name"] is None:
self.images[id]["name"] = "Image " + str(id)
def _prepare_box(self, box: object):
return box
def _prepare_mask(self, mask: object):
return mask
def _add(
self,
image_id: int,
class_id: int,
box: object = None,
mask: object = None,
score: float = 1,
ignore: bool = False,
):
"""Add a data object to this collection. You should use one of the below functions instead."""
self._make_default_class(class_id)
self._make_default_image(image_id)
new_id = len(self.annotations)
self.annotations.append(
{
"_id": new_id,
"score": score,
"image": image_id,
"class": class_id,
"bbox": self._prepare_box(box),
"mask": self._prepare_mask(mask),
"ignore": ignore,
}
)
self.images[image_id]["anns"].append(new_id)
def add_ground_truth(
self, image_id: int, class_id: int, box: object = None, mask: object = None
):
"""Add a ground truth. If box or mask is None, this GT will be ignored for that mode."""
self._add(image_id, class_id, box, mask)
def add_detection(
self,
image_id: int,
class_id: int,
score: int,
box: object = None,
mask: object = None,
):
"""Add a predicted detection. If box or mask is None, this prediction will be ignored for that mode."""
self._add(image_id, class_id, box, mask, score=score)
def add_ignore_region(
self,
image_id: int,
class_id: int = None,
box: object = None,
mask: object = None,
):
"""
Add a region inside of which background detections should be ignored.
You can use these to mark a region that has deliberately been left unannotated
(e.g., if is a huge crowd of people and you don't want to annotate every single person in the crowd).
If class_id is -1, this region will match any class. If the box / mask is None, the region will be the entire image.
"""
self._add(image_id, class_id, box, mask, ignore=True)
def add_class(self, id: int, name: str):
"""Register a class name to that class ID."""
self.classes[id] = name
def add_image(self, id: int, name: str):
"""Register an image name/path with an image ID."""
self.images[id]["name"] = name
def get(self, image_id: int):
"""Collects all the annotations / detections for that particular image."""
return [
self.annotations[x] for x in self.images.get(image_id, {}).get("anns", [])
] | /rungalileo-tidecv-0.0.5.tar.gz/rungalileo-tidecv-0.0.5/tidecv/data.py | 0.938906 | 0.637581 | data.py | pypi |
import json
from collections import defaultdict
from copy import copy
from typing import List, Tuple
from tidecv.data import Data
from tidecv.errors.error import Error
def json_to_Data(json_path: str) -> Tuple[Data, Data]:
"""
Parse a json file obtained from a vaex dataframe df_box via .to_records()
and create a GT and Pred structure of type Data which are necessary for
calling TIDE
"""
with open(json_path) as jfile:
data = json.load(jfile)
# Create GTs Data
gts = Data(name="test_gt")
gt_image_ids = defaultdict(list)
gt_annotations = [ann for ann in data if ann["is_gold"]]
# Parse the json and convert to Data structure
for i, ann in enumerate(gt_annotations):
ann["_id"] = i
ann["mask"] = None
ann["ignore"] = False
ann["class"] = ann["gold"]
ann["bbox"] = ann["bbox_xywh"] # boxes already have the required format
gt_image_ids[ann["image_id"]].append(ann["_id"])
gts.annotations = gt_annotations
# Internal metadata for TIDE, needs to know all the classes
for i in sorted({ann["class"] for ann in gt_annotations}):
gts.classes[i] = f"Class {i}"
# TIDE needs the list of box_ids for every image id in order to do its calculations
for i, anns in gt_image_ids.items():
gts.images[i]["name"] = f"Image {i}"
gts.images[i]["anns"] = anns
# Create Preds
preds = Data(name="test_pred")
pred_image_ids = defaultdict(list)
pred_annotations = [ann for ann in data if ann["is_pred"]]
# Parse the json and convert to Data structure
for i, pred in enumerate(pred_annotations):
pred["_id"] = i
pred["mask"] = None
pred["ignore"] = False
pred["class"] = pred["pred"]
pred["score"] = pred["confidence"]
pred["bbox"] = pred["bbox_xywh"] # boxes already have the required format
pred_image_ids[pred["image_id"]].append(pred["_id"])
preds.annotations = pred_annotations
# Internal metadata for TIDE, needs to know all the classes
for i in sorted({pred["class"] for pred in pred_annotations}):
preds.classes[i] = f"Class {i}"
# TIDE needs the list of box_ids for every image id in order to do its calculations
for i, pred in pred_image_ids.items():
preds.images[i]["name"] = f"Image {i}"
preds.images[i]["anns"] = pred
return gts, preds
def create_filtered_Data(
data: Data,
ids_keep: set,
data_name: str = "filtered_data",
reamapping_new_to_old_ids: dict = None,
) -> Data:
"""
Create a filtered object Data containing only the annotations with ids in ids_keep
"""
# Create GTs Data
data_filtered = Data(name=data_name)
# Restrict the annotations
new_id_to_old_id = {}
annotations = []
for i, ann in enumerate(
[ann for ann in data.annotations if ann["_id"] in ids_keep]
):
# We copy the annotation to not change the previous one.
# TIDE requires all _ids to be of the form range(X), so we re-index and save a dict
new_id_to_old_id[i] = ann["_id"]
new_ann = copy(ann)
new_ann["_id"] = i
annotations.append(new_ann)
# Adjust the link for all the preds that have one
if reamapping_new_to_old_ids is not None:
reamapping_old_to_new_ids = {
old: new for new, old in reamapping_new_to_old_ids.items()
}
for ann in annotations:
if "info" in ann and ann["info"].get("matched_with"):
# The id could be missing from the dict if we don't have
# all links and the gts is not kept.
ann["info"]["matched_with"] = reamapping_old_to_new_ids.get(
ann["info"]["matched_with"]
)
data_filtered.annotations = annotations
# Restrict the classes
for i in sorted({ann["class"] for ann in annotations}):
data_filtered.classes[i] = f"Class {i}"
# Restrict the images and what annotations they have
image_ids = defaultdict(list)
for i, ann in enumerate(annotations):
image_ids[ann["image_id"]].append(ann["_id"])
for i, anns in image_ids.items():
data_filtered.images[i]["name"] = f"Image {i}"
data_filtered.images[i]["anns"] = anns
return data_filtered, new_id_to_old_id
def enlarge_dataset_to_respect_TIDE(
gts: Data, preds: Data, gts_keep: set, preds_keep: set, errors: List[Error]
) -> Tuple[Data, Data, dict, dict]:
"""
Enlarge completely to respect TIDE, i.e., add all the possible links since
we want TIDE computed on the filtered dataset = TIDE computed on the large
dataset + restricted to filtered dataset.
Adding only direct links pred -> gt is not enough. For example if the
filtered dataset only contains one Dupe, adding 1-links will add the
associated GT, but not the pred, so that Dupe becomes a TP in the filtered
dataset with only 1-links.
input:
- gts, preds: the Data instance for gts and preds. The preds instance is
also used to extract the links pred TP -> gt TP
- gts_keep, preds_keep: set of ids to keep in the filtered dataset
- errors: list of errors that is used to extract the links pred -> gt for
when pred is not a TP
return:
- a tuple of Data instances (gts_enlarged, preds_enlarged)
"""
# Extract a mapping pred error id -> gt assoc id
pred_id_to_gt_id = {}
for error in errors:
if hasattr(error, "pred") and hasattr(error, "gt"):
pred_id_to_gt_id[error.pred["_id"]] = error.gt["_id"]
# Add mappings pred TP id -> gt assoc TP id
pred_id_to_gt_id.update(
{
pred["_id"]: pred["info"]["matched_with"]
for pred in preds.annotations
if "matched_with" in pred.get("info", {})
}
)
# Add GTs
assoc_gts = {pred_id_to_gt_id[pred_id] for pred_id in preds_keep}
# Add Preds
filetered_gts = set(gts_keep).union(assoc_gts)
assoc_preds = {
pred_id for pred_id, gt_id in pred_id_to_gt_id.items() if gt_id in filetered_gts
}
filetered_preds = assoc_preds.union(preds_keep)
# Enlarge them
gts_enlarged, gts_new_id_to_old_id = create_filtered_Data(gts, filetered_gts)
preds_enlarged, preds_new_it_to_old_id = create_filtered_Data(
preds, filetered_preds, reamapping_new_to_old_ids=gts_new_id_to_old_id
)
return gts_enlarged, preds_enlarged, gts_new_id_to_old_id, preds_new_it_to_old_id
def filter_dataset_to_label(gts: Data, preds: Data, cls_id: int) -> Tuple[Data, Data]:
"""
filter a dataset (preds and gts) to only those annotations with a given class.
input:
- gtsd, preds: the Data instances for preds and gts
- cls_id: class to filter by
return:
- a tuple of Data instance (gts_filtered, preds_filtered) of ids to keep in the
filtered dataset
"""
gts_ids = {gt["_id"] for gt in gts.annotations if gt["class"] == cls_id}
preds_ids = {pred["_id"] for pred in preds.annotations if pred["class"] == cls_id}
gts_filtered, gts_new_id_to_old_id = create_filtered_Data(gts, gts_ids)
preds_filtered, _ = create_filtered_Data(
preds, preds_ids, reamapping_new_to_old_ids=gts_new_id_to_old_id
)
return gts_filtered, preds_filtered | /rungalileo-tidecv-0.0.5.tar.gz/rungalileo-tidecv-0.0.5/tidecv/helpers.py | 0.688783 | 0.374791 | helpers.py | pypi |
import os
import sys
import numpy as np
def mean(arr: list):
if len(arr) == 0:
return 0
return sum(arr) / len(arr)
def find_first(arr: np.array) -> int:
"""Finds the index of the first instance of true in a vector or None if not found."""
if len(arr) == 0:
return None
idx = arr.argmax()
# Numpy argmax will return 0 if no True is found
if idx == 0 and not arr[0]:
return None
return idx
def isiterable(x):
try:
iter(x)
return True
except:
return False
def recursive_sum(x):
if isinstance(x, dict):
return sum([recursive_sum(v) for v in x.values()])
elif isiterable(x):
return sum([recursive_sum(v) for v in x])
else:
return x
def apply_messy(x: list, func):
return [([func(y) for y in e] if isiterable(e) else func(e)) for e in x]
def apply_messy2(x: list, y: list, func):
return [
[func(i, j) for i, j in zip(a, b)] if isiterable(a) else func(a, b)
for a, b in zip(x, y)
]
def multi_len(x):
try:
return len(x)
except TypeError:
return 1
def unzip(l):
return map(list, zip(*l))
def points(bbox):
bbox = [int(x) for x in bbox]
return (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3])
def nonepack(t):
if t is None:
return None, None
else:
return t
class HiddenPrints:
"""From https://stackoverflow.com/questions/8391411/suppress-calls-to-print-python"""
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
def toRLE(mask: object, w: int, h: int):
"""
Borrowed from Pycocotools:
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
import pycocotools.mask as maskUtils
if type(mask) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(mask, h, w)
return maskUtils.merge(rles)
elif type(mask["counts"]) == list:
# uncompressed RLE
return maskUtils.frPyObjects(mask, h, w)
else:
return mask
def polyToBox(poly: list):
"""Converts a polygon in COCO lists of lists format to a bounding box in [x, y, w, h]."""
xmin = 1e10
xmax = -1e10
ymin = 1e10
ymax = -1e10
for poly_comp in poly:
for i in range(len(poly_comp) // 2):
x = poly_comp[2 * i + 0]
y = poly_comp[2 * i + 1]
xmin = min(x, xmin)
xmax = max(x, xmax)
ymin = min(y, ymin)
ymax = max(y, ymax)
return [xmin, ymin, (xmax - xmin), (ymax - ymin)] | /rungalileo-tidecv-0.0.5.tar.gz/rungalileo-tidecv-0.0.5/tidecv/functions.py | 0.520253 | 0.615781 | functions.py | pypi |
from collections import defaultdict
from typing import Dict
import numpy as np
from pycocotools import mask as mask_utils
from .data import Data
class APDataObject:
"""
Stores all the information necessary to calculate the AP for one IoU and one class.
Note: I type annotated this because why not.
"""
def __init__(self):
self.data_points = {} # dict with all preds (id -> data)
self.false_negatives = set() # set of FN ids (i.e., not TPs, i.e., FN + Missed)
self.num_gt_positives = 0 # total number of GTs
self.curve = None
def apply_qualifier_no_check(self, kept_preds: set, kept_gts: set) -> object:
"""
Makes a new data object where we only keep some of the ids in the pred
and gt lists.
We make no checks and assume that the given ids are exactly what we want.
As an example we could check that given a Pred TP, we also have the
associated GT making it a TP, otherwise that Pred could turn into an FP
in the filtered data object.
We do not make such checks in order to avoid unexpected effects.
"""
obj = APDataObject()
if not isinstance(kept_preds, set):
kept_preds = set(kept_preds)
# Restrict the Preds
obj.data_points = {
pred_id: pred_vals
for pred_id, pred_vals in self.data_points.items()
if pred_id in kept_preds
}
# Propogate the Gts
obj.false_negatives = self.false_negatives.intersection(kept_gts)
obj.num_gt_positives = len(kept_gts)
return obj
def apply_qualifier(self, kept_preds: set, kept_gts: set) -> object:
"""
Makes a new data object where we remove the ids in the preds and gts.
"""
obj = APDataObject()
num_gt_removed = 0
if not isinstance(kept_preds, set):
kept_preds = set(kept_preds)
for pred_id in self.data_points:
score, is_true, info = self.data_points[pred_id]
# If the data point we kept was a true positive, there's a
# corresponding ground truth.
# If so, we should only add that positive if the corresponding
# ground truth has been kept
if is_true and info["matched_with"] not in kept_gts:
num_gt_removed += 1
continue
if pred_id in kept_preds:
obj.data_points[pred_id] = self.data_points[pred_id]
# Propogate the gt
obj.false_negatives = self.false_negatives.intersection(kept_gts)
num_gt_removed += len(self.false_negatives) - len(obj.false_negatives)
obj.num_gt_positives = self.num_gt_positives - num_gt_removed
return obj
def push(self, id: int, score: float, is_true: bool, info: dict = {}):
self.data_points[id] = (score, is_true, info)
def push_false_negative(self, id: int):
self.false_negatives.add(id)
def add_gt_positives(self, num_positives: int):
"""Call this once per image."""
self.num_gt_positives += num_positives
def is_empty(self) -> bool:
return len(self.data_points) == 0 and self.num_gt_positives == 0
def get_pr_curve(self) -> tuple:
if self.curve is None:
self.get_ap()
return self.curve
def get_ap(self) -> float:
"""Warning: result not cached."""
if self.num_gt_positives == 0:
return 0
# Sort descending by score
data_points = list(self.data_points.values())
data_points.sort(key=lambda x: -x[0])
precisions = []
recalls = []
num_true = 0
num_false = 0
# Compute the precision-recall curve. The x axis is recalls and the y axis precisions.
for datum in data_points:
# datum[1] is whether the detection a true or false positive
if datum[1]:
num_true += 1
else:
num_false += 1
precision = num_true / (num_true + num_false)
recall = num_true / self.num_gt_positives
precisions.append(precision)
recalls.append(recall)
# Smooth the curve by computing [max(precisions[i:]) for i in range(len(precisions))]
# Basically, remove any temporary dips from the curve.
# At least that's what I think, idk. COCOEval did it so I do too.
for i in range(len(precisions) - 1, 0, -1):
if precisions[i] > precisions[i - 1]:
precisions[i - 1] = precisions[i]
# Compute the integral of precision(recall) d_recall from recall=0->1 using fixed-length riemann summation with 101 bars.
resolution = 100 # Standard COCO Resoluton
y_range = [0] * (
resolution + 1
) # idx 0 is recall == 0.0 and idx 100 is recall == 1.00
x_range = np.array([x / resolution for x in range(resolution + 1)])
recalls = np.array(recalls)
# I realize this is weird, but all it does is find the nearest precision(x) for a given x in x_range.
# Basically, if the closest recall we have to 0.01 is 0.009 this sets precision(0.01) = precision(0.009).
# I approximate the integral this way, because that's how COCOEval does it.
indices = np.searchsorted(recalls, x_range, side="left")
for bar_idx, precision_idx in enumerate(indices):
if precision_idx < len(precisions):
y_range[bar_idx] = precisions[precision_idx]
self.curve = (x_range, y_range)
# Finally compute the riemann sum to get our integral.
# avg([precision(x) for x in 0:0.01:1])
return sum(y_range) / len(y_range) * 100
class ClassedAPDataObject:
"""Stores an APDataObject for each class in the dataset."""
def __init__(self):
self.objs = defaultdict(lambda: APDataObject())
def apply_qualifier(
self, pred_dict: dict, gt_dict: dict, check: bool = False
) -> object:
ret = ClassedAPDataObject()
for _class, obj in self.objs.items():
pred_set = pred_dict.get(_class, set())
gt_set = gt_dict.get(_class, set())
if check:
ret.objs[_class] = obj.apply_qualifier(pred_set, gt_set)
else:
ret.objs[_class] = obj.apply_qualifier_no_check(pred_set, gt_set)
return ret
def push(self, class_: int, id: int, score: float, is_true: bool, info: dict = {}):
self.objs[class_].push(id, score, is_true, info)
def push_false_negative(self, class_: int, id: int):
self.objs[class_].push_false_negative(id)
def add_gt_positives(self, class_: int, num_positives: int):
self.objs[class_].add_gt_positives(num_positives)
def get_mAP(self) -> float:
aps = [x.get_ap() for x in self.objs.values() if not x.is_empty()]
# If there are no objects (no golds, no preds), then it's a perfect run
if not aps:
return 100.0
return sum(aps) / len(aps)
def get_APs(self) -> Dict[int, float]:
return {
cls_id: x.get_ap() for cls_id, x in self.objs.items() if not x.is_empty()
}
def get_gt_positives(self) -> dict:
return {k: v.num_gt_positives for k, v in self.objs.items()}
def get_pr_curve(self, cat_id: int = None) -> tuple:
if cat_id is None:
# Average out the curves when using all categories
curves = [x.get_pr_curve() for x in list(self.objs.values())]
x_range = curves[0][0]
y_range = [0] * len(curves[0][1])
for x, y in curves:
for i in range(len(y)):
y_range[i] += y[i]
for i in range(len(y_range)):
y_range[i] /= len(curves)
else:
x_range, y_range = self.objs[cat_id].get_pr_curve()
return x_range, y_range
# Note: Unused.
class APEval:
"""
A class that computes the AP of some dataset.
Note that TIDE doesn't use this internally.
This is here so you can get a look at how the AP calculation process works.
"""
def __init__(self):
self.iou_thresholds = [x / 100 for x in range(50, 100, 5)]
self.ap_data = defaultdict(lambda: defaultdict(lambda: APDataObject()))
def _eval_image(self, preds: list, gt: list, type_str: str = "box"):
data_type = "segmentation" if type_str == "mask" else "bbox"
preds_data = [x[data_type] for x in preds]
# Split gt and crowd annotations
gt_new = []
gt_crowd = []
for x in gt:
if x["iscrowd"]:
gt_crowd.append(x)
else:
gt_new.append(x)
gt = gt_new
# Some setup
num_pred = len(preds)
num_gt = len(gt)
num_crowd = len(gt_crowd)
iou_cache = mask_utils.iou(
preds_data, [x[data_type] for x in gt], [False] * num_gt
)
if num_crowd > 0:
crowd_iou_cache = mask_utils.iou(
preds_data, [x[data_type] for x in gt_crowd], [True] * num_crowd
)
# Make sure we're evaluating sorted by score
indices = list(range(num_pred))
indices.sort(key=lambda i: -preds[i]["score"])
classes = [x["category_id"] for x in preds]
gt_classes = [x["category_id"] for x in gt]
crowd_classes = [x["category_id"] for x in gt_crowd]
for _class in set(classes + gt_classes):
num_gt_for_class = sum([1 for x in gt_classes if x == _class])
for iouIdx in range(len(self.iou_thresholds)):
iou_threshold = self.iou_thresholds[iouIdx]
gt_used = [False] * len(gt_classes)
ap_obj = self.ap_data[iouIdx][_class]
ap_obj.add_gt_positives(num_gt_for_class)
for i in indices:
if classes[i] != _class:
continue
max_iou_found = iou_threshold
max_match_idx = -1
for j in range(num_gt):
if gt_used[j] or gt_classes[j] != _class:
continue
iou = iou_cache[i][j]
if iou > max_iou_found:
max_iou_found = iou
max_match_idx = j
if max_match_idx >= 0:
gt_used[max_match_idx] = True
ap_obj.push(preds[i]["score"], True)
else:
# If the detection matches a crowd, we can just ignore it
matched_crowd = False
if num_crowd > 0:
for j in range(len(crowd_classes)):
if crowd_classes[j] != _class:
continue
iou = crowd_iou_cache[i][j]
if iou > iou_threshold:
matched_crowd = True
break
# All this crowd code so that we can make sure that our eval code gives the
# same result as COCOEval. There aren't even that many crowd annotations to
# begin with, but accuracy is of the utmost importance.
if not matched_crowd:
ap_obj.push(preds[i]["score"], False)
def evaluate(self, preds: Data, gt: Data, type_str: str = "box"):
for id in gt.ids:
x = preds.get(id)
y = gt.get(id)
self._eval_image(x, y, type_str)
def compute_mAP(self):
num_threshs = len(self.ap_data)
thresh_APs = []
for thresh, classes in self.ap_data.items():
num_classes = len([x for x in classes.values() if not x.is_empty()])
ap = 0
if num_classes > 0:
class_APs = [x.get_ap() for x in classes.values() if not x.is_empty()]
ap = sum(class_APs) / num_classes
thresh_APs.append(ap)
return round(sum(thresh_APs) / num_threshs * 100, 2) | /rungalileo-tidecv-0.0.5.tar.gz/rungalileo-tidecv-0.0.5/tidecv/ap.py | 0.911885 | 0.602588 | ap.py | pypi |
import json
import os
import shutil
import urllib.request
import zipfile
from collections import defaultdict
from pathlib import Path
from appdirs import user_data_dir
from . import functions as f
from .data import Data
def default_name(path: str) -> str:
return os.path.splitext(os.path.basename(path))[0]
def get_tide_path():
if "TIDE_PATH" in os.environ:
tide_path = os.environ["TIDE_PATH"]
else:
tide_path = user_data_dir("tidecv", appauthor=False)
if not os.path.exists(tide_path):
os.makedirs(tide_path)
return tide_path
def download_annotations(name: str, url: str, force_download: bool = False) -> str:
tide_path = get_tide_path()
candidate_path = os.path.join(tide_path, name)
finished_file_path = os.path.join(candidate_path, "_finished")
zip_file_path = os.path.join(candidate_path, "_tmp.zip")
# Check if the file has already been downloaded
# If there isn't a file called _finished, that means we didn't finish downloading last time, so try again
already_downloaded = os.path.exists(candidate_path) and os.path.exists(
finished_file_path
)
if not force_download and already_downloaded:
return candidate_path
else:
print("{} annotations not found. Downloading...".format(name))
if os.path.exists(candidate_path):
shutil.rmtree(candidate_path)
os.makedirs(candidate_path)
urllib.request.urlretrieve(url, zip_file_path)
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
zip_ref.extractall(candidate_path)
os.remove(zip_file_path)
open(
finished_file_path, "a"
).close() # Make an empty _finished file to mark that we were successful
print('Successfully downloaded {} to "{}"'.format(name, candidate_path))
return candidate_path
def COCO(
path: str = None,
name: str = None,
year: int = 2017,
ann_set: str = "val",
force_download: bool = False,
) -> Data:
"""
Loads ground truth from a COCO-style annotation file.
If path is not specified, this will download the COCO annotations for the year and ann_set specified.
Valid years are 2014, 2017 and valid ann_sets are 'val' and 'train'.
"""
if path is None:
path = download_annotations(
"COCO{}".format(year),
"http://images.cocodataset.org/annotations/annotations_trainval{}.zip".format(
year
),
force_download,
)
path = os.path.join(
path, "annotations", "instances_{}{}.json".format(ann_set, year)
)
if name is None:
name = default_name(path)
with open(path, "r") as json_file:
cocojson = json.load(json_file)
images = cocojson["images"]
anns = cocojson["annotations"]
cats = cocojson["categories"] if "categories" in cocojson else None
# Add everything from the coco json into our data structure
data = Data(name, max_dets=100)
image_lookup = {}
for idx, image in enumerate(images):
image_lookup[image["id"]] = image
data.add_image(image["id"], image["file_name"])
if cats is not None:
for cat in cats:
data.add_class(cat["id"], cat["name"])
for ann in anns:
image = ann["image_id"]
_class = ann["category_id"]
box = ann["bbox"]
mask = f.toRLE(
ann["segmentation"],
image_lookup[image]["width"],
image_lookup[image]["height"],
)
if ann["iscrowd"]:
data.add_ignore_region(image, _class, box, mask)
else:
data.add_ground_truth(image, _class, box, mask)
return data
def COCOResult(path: str, name: str = None) -> Data:
"""Loads predictions from a COCO-style results file."""
if name is None:
name = default_name(path)
with open(path, "r") as json_file:
dets = json.load(json_file)
data = Data(name)
for det in dets:
image = det["image_id"]
_cls = det["category_id"]
score = det["score"]
box = det["bbox"] if "bbox" in det else None
mask = det["segmentation"] if "segmentation" in det else None
data.add_detection(image, _cls, score, box, mask)
return data
def LVIS(
path: str = None,
name: str = None,
version_str: str = "v1",
force_download: bool = False,
) -> Data:
"""
Load an LVIS-style dataset.
The version string is used for downloading the dataset and should be one of the versions of LVIS (e.g., v0.5, v1).
Note that LVIS evaulation is special, but we can emulate it by adding ignore regions.
The detector isn't punished for predicted class that LVIS annotators haven't guarenteed are in
the image (i.e., the sum of GT annotated classes in the image and those marked explicitly not
in the image.) In order to emulate this behavior, add ignore region labels for every class not
found to be in the image. This is not that inefficient because ignore regions are separate out
during mAP calculation and error processing, so adding a bunch of them doesn't hurt.
The LVIS AP numbers are slightly lower than what the LVIS API reports because of these workarounds.
"""
if path is None:
path = download_annotations(
"LVIS{}".format(version_str),
"https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_{}_val.json.zip".format(
version_str
),
force_download,
)
path = os.path.join(path, "lvis_{}_val.json".format(version_str))
if name is None:
name = default_name(path)
with open(path, "r") as json_file:
lvisjson = json.load(json_file)
images = lvisjson["images"]
anns = lvisjson["annotations"]
cats = lvisjson["categories"] if "categories" in lvisjson else None
data = Data(name, max_dets=300)
image_lookup = {}
classes_in_img = defaultdict(lambda: set())
for image in images:
image_lookup[image["id"]] = image
data.add_image(
image["id"], image["coco_url"]
) # LVIS has no image names, only coco urls
# Negative categories are guarenteed by the annotators to not be in the image.
# Thus we should care about them during evaluation.
for cat_id in image["neg_category_ids"]:
classes_in_img[image["id"]].add(cat_id)
if cats is not None:
for cat in cats:
data.add_class(cat["id"], cat["synset"])
for ann in anns:
image = ann["image_id"]
_class = ann["category_id"]
box = ann["bbox"]
mask = f.toRLE(
ann["segmentation"],
image_lookup[image]["width"],
image_lookup[image]["height"],
)
data.add_ground_truth(image, _class, box, mask)
# There's an annotation for this class, so we should consider the class for evaluation.
classes_in_img[image].add(_class)
all_classes = set(data.classes.keys())
# LVIS doesn't penalize the detector for detecting classes that the annotators haven't guarenteed to be in/out of
# the image. Here we simulate that property by adding ignore regions for all such classes.
for image in images:
ignored_classes = all_classes.difference(classes_in_img[image["id"]])
# LVIS doesn't penalize the detector for mistakes made on classes explicitly marked as not exhaustively annoted
# We can emulate this by adding ignore regions for every category listed, so add them to the ignored classes.
ignored_classes.update(set(image["not_exhaustive_category_ids"]))
for _cls in ignored_classes:
data.add_ignore_region(image["id"], _cls)
return data
def LVISResult(path: str, name: str = None) -> Data:
"""Loads predictions from a LVIS-style results file. Note that this is the same as a COCO-style results file."""
return COCOResult(path, name)
def Pascal(
path: str = None,
name: str = None,
year: int = 2007,
ann_set: str = "val",
force_download: bool = False,
) -> Data:
"""
Loads the Pascal VOC 2007 or 2012 data from a COCO json.
Valid years are 2007 and 2012, and valid ann_sets are 'train' and 'val'.
"""
if path is None:
path = download_annotations(
"Pascal",
"https://s3.amazonaws.com/images.cocodataset.org/external/external_PASCAL_VOC.zip",
force_download,
)
path = os.path.join(
path, "PASCAL_VOC", "pascal_{}{}.json".format(ann_set, year)
)
return COCO(path, name)
def Cityscapes(path: str, name: str = None):
"""
Loads the fine cityscapes annotations as instance segmentation masks, and also generates bounding boxes for them.
Note that we can't automatically download Cityscapes because it requires registration and an agreement to the ToS.
You can get cityscapes here: https://www.cityscapes-dataset.com/
Path should be to gtFine/<ann_set>. E.g., <path_to_cityscapes>/gtFine/val.
"""
if name is None:
name = default_name(path)
data = Data(name)
instance_classes = {
"person": 24,
"rider": 25,
"car": 26,
"truck": 27,
"train": 31,
"motorcycle": 32,
"bicycle": 33,
"bus": 28,
"caravan": 29,
"trailer": 30,
}
ignored_classes = set([29, 30])
for class_name, class_id in instance_classes.items():
data.add_class(class_id, class_name)
for ann in Path(path).glob("*/*.json"):
with open(ann) as json_file:
ann_json = json.load(json_file)
# Note: a string for an image ID is okay
image_id = os.path.basename(ann).replace("_gtFine_polygons.json", "")
objs = ann_json["objects"]
data.add_image(
image_id, image_id
) # The id in this case is just the name of the image
# Caravan and Trailer should be ignored from all evaluation
for _cls in ignored_classes:
data.add_ignore_region(image_id, _cls)
for obj in objs:
class_label = obj["label"]
is_crowd = False
# Cityscapes labelers can label objects without defined boundaries as 'group'. In COCO-land this would be
# a crowd annotation. So in this case, let's make it an ignore region.
if class_label.endswith("group"):
is_crowd = True
class_label = class_label[:-5] # Remove the group at the end
# We are only considering instance classes
if not class_label in instance_classes:
continue
class_id = instance_classes[class_label]
# If the class is not used in evaluation, don't include it
if class_id in ignored_classes:
continue
poly = [
sum(obj["polygon"], [])
] # Converts a list of points to a list of lists of ints, where every 2 ints represents a point
box = f.polyToBox(poly)
if is_crowd:
data.add_ignore_region(image_id, class_id, box, poly)
else:
data.add_ground_truth(image_id, class_id, box, poly)
return data | /rungalileo-tidecv-0.0.5.tar.gz/rungalileo-tidecv-0.0.5/tidecv/datasets.py | 0.49585 | 0.190705 | datasets.py | pypi |
from typing import Union
class Error:
"""A base class for all error types."""
def fix(self) -> Union[tuple, None]:
"""
Returns a fixed version of the AP data point for this error or
None if this error should be suppressed.
Return type is:
class:int, (score:float, is_positive:bool, info:dict)
"""
raise NotImplementedError
def unfix(self) -> Union[tuple, None]:
"""Returns the original version of this data point."""
if hasattr(self, "pred"):
# If an ignored instance is an error, it's not in the data point list, so there's no "unfixed" entry
if self.pred["used"] is None:
return None
else:
return self.pred["class"], (
self.pred["score"],
False,
self.pred["info"],
)
else:
return None
def is_pred(self) -> bool:
return hasattr(self, "pred")
def is_gt(self) -> bool:
return hasattr(self, "gt")
def is_contained_in(self, pred_ids: set, gt_ids: set) -> bool:
# check if NONe
if not isinstance(pred_ids, set):
pred_ids = set(pred_ids)
if not isinstance(gt_ids, set):
gt_ids = set(gt_ids)
return (
(self.get_id() in pred_ids) if self.is_pred() else (self.get_id() in gt_ids)
)
def get_id(self) -> int:
if hasattr(self, "pred"):
return self.pred["_id"]
elif hasattr(self, "gt"):
return self.gt["_id"]
else:
return -1
def get_info(self, dataset):
info = {}
info["type"] = self.short_name
if hasattr(self, "gt"):
info["gt"] = self.gt
if hasattr(self, "pred"):
info["pred"] = self.pred
img_id = (self.pred if hasattr(self, "pred") else self.gt)["image_id"]
info["all_gt"] = dataset.get(img_id)
info["img"] = dataset.get_img(img_id)
return info
class BestGTMatch:
"""
Some errors are fixed by changing false positives to true positives.
The issue with fixing these errors naively is that you might have
multiple errors attempting to fix the same GT. In that case, we need
to select which error actually gets fixed, and which others just get
suppressed (since we can only fix one error per GT).
To address this, this class finds the prediction with the hiighest
score and then uses that as the error to fix, while suppressing all
other errors caused by the same GT.
"""
def __init__(self, pred, gt):
self.pred = pred
self.gt = gt
if self.gt["used"]:
self.suppress = True
else:
self.suppress = False
self.gt["usable"] = True
score = self.pred["score"]
if not "best_score" in self.gt:
self.gt["best_score"] = -1
if self.gt["best_score"] < score:
self.gt["best_score"] = score
self.gt["best_id"] = self.pred["_id"]
def fix(self):
if self.suppress or self.gt["best_id"] != self.pred["_id"]:
return None
else:
return (self.pred["score"], True, self.pred["info"]) | /rungalileo-tidecv-0.0.5.tar.gz/rungalileo-tidecv-0.0.5/tidecv/errors/error.py | 0.905483 | 0.362913 | error.py | pypi |
from .error import BestGTMatch, Error
class ClassError(Error):
description = (
"Error caused when a prediction would have been marked positive "
+ "if it had the correct class."
)
short_name = "Cls"
def __init__(self, pred: dict, gt: dict):
self.pred = pred
self.gt = gt
self.match = BestGTMatch(pred, gt) if not self.gt["used"] else None
def fix(self):
if self.match is None:
return None
return self.gt["class"], self.match.fix()
class BoxError(Error):
description = "Error caused when a prediction would have been marked positive if it was localized better."
short_name = "Loc"
def __init__(self, pred: dict, gt: dict):
self.pred = pred
self.gt = gt
self.match = BestGTMatch(pred, gt) if not self.gt["used"] else None
def fix(self):
if self.match is None:
return None
return self.pred["class"], self.match.fix()
class DuplicateError(Error):
description = (
"Error caused when a prediction would have been marked positive "
+ "if the GT wasn't already in use by another detection."
)
short_name = "Dupe"
def __init__(self, pred: dict, gt: dict, suppressor: dict):
self.pred = pred
self.gt = gt
self.suppressor = suppressor
def fix(self):
return None
class BackgroundError(Error):
description = "Error caused when this detection should have been classified as background (IoU < 0.1)."
short_name = "Bkg"
def __init__(self, pred: dict):
self.pred = pred
def fix(self):
return None
class ClassBoxError(Error):
description = (
"Error caused when a prediction would have been marked positive "
+ "if it had the correct class and was localized better."
)
short_name = "ClsLoc"
def __init__(self, pred: dict, gt: dict):
self.pred = pred
self.gt = gt
def fix(self):
return None
class MissedError(Error):
description = "Represents GT missed by the model. Doesn't include GT corrected elsewhere in the model."
short_name = "Miss"
def __init__(self, gt: dict):
self.gt = gt
def fix(self):
return self.gt["class"], -1
# These are special errors so no inheritence
class FalsePositiveError:
description = (
"Represents the potential AP gained by having perfect precision"
+ " (e.g., by scoring all false positives as conf=0) without affecting recall."
)
short_name = "FalsePos"
@staticmethod
def fix(score: float, correct: bool, info: dict) -> tuple:
if correct:
return 1, True, info
else:
return 0, False, info
class FalseNegativeError:
description = (
"Represents the potentially AP gained by having perfect recall"
+ " without affecting precision."
)
short_name = "FalseNeg" | /rungalileo-tidecv-0.0.5.tar.gz/rungalileo-tidecv-0.0.5/tidecv/errors/main_errors.py | 0.826537 | 0.268171 | main_errors.py | pypi |
# rungutan-cli
## What is Rungutan?
[Rungutan](https://rungutan.com) is the first API Load Testing SaaS platform worldwide, 100% Serverless, which helps you simulate workflows to emulate user experience, so it's easier to design workflow oriented strategies.
## Where do I sign up?
Although we love our [landing page](https://rungutan.com) and we definitely think it's worth checking out, you can sign up directly by going on [https://app.rungutan.com/signup](https://app.rungutan.com/signup)
## Do you have any ACTUAL documentation?
D'oh.
You can find it on our [Docs](https://docs.rungutan.com) page.
## Why use the CLI?
This CLI has been designed for:
1) perform load testing directly within a CI/CD process
2) run any command that you would do on the web platform directly in your terminal
## How to install the CLI?
```shell script
pip install rungutan
```
## How to run the CLI?
* Set up your authentication mechanism using the Rungutan API key
```shell script
rungutan configure (--profile some-profile-name)
```
* Check the overall help menu
```shell script
$ rungutan help
usage: rungutan <command> [<args>]
To see help text, you can run:
rungutan help
rungutan version
rungutan configure --help
rungutan domains --help
rungutan team --help
rungutan results --help
rungutan raw_results --help
rungutan tests --help
rungutan templates --help
rungutan crons --help
rungutan notifications --help
rungutan vault --help
rungutan csv --help
rungutan certificate --help
rungutan file --help
Rungutan CLI utility for interacting with https://rungutan.com
positional arguments:
command Command to run
optional arguments:
-h, --help show this help message and exit
```
* Check the help menu for a specific command
```shell script
$ rungutan domains --help
usage: rungutan [-h] [--domain_name DOMAIN_NAME] [-p PROFILE]
[{list,remove,add}]
Domain command system
positional arguments:
{list,remove,add}
optional arguments:
-h, --help show this help message and exit
--domain_name DOMAIN_NAME
Required parameter for subcommand ["remove", "add"]
-p PROFILE, --profile PROFILE
The profile you'll be using.
If not specified, the "default" profile will be used.
If no profiles are defined, the following env variables will be checked:
* RUNGUTAN_TEAM_ID
* RUNGUTAN_API_KEY
```
* Actually run a command
```shell script
$ rungutan domains list
{
"Domains": [
{
"domain_name": "rungutan.com",
"submitted_date": "2020-01-22T09:43:08Z",
"member_email": "owner@team.com"
}
]
}
```
## Run it as a docker container
* Using local volume
```shell script
$ docker run \
-v ${HOME}/.rungutan:/root/.rungutan \
rungutancommunity/rungutan-cli:latest rungutan tests --help
usage: rungutan [-h] [--test_id TEST_ID] [--test_file TEST_FILE]
[--test_public {public,private}] [--test_name TEST_NAME]
[--wait_to_finish] [-p PROFILE]
[{list,add,cancel,remove,get,preview-credits,set-sharing}]
Tests command system
positional arguments:
{list,add,cancel,remove,get,preview-credits,set-sharing}
optional arguments:
-h, --help show this help message and exit
--test_id TEST_ID Required parameter for subcommand ["cancel", "get", "set-sharing", "remove"].
Optional parameter for subcommand ["list"]
--test_file TEST_FILE
Required parameter for subcommand ["add", "preview-credits"].
You can specify --test_file or --template_id, but not both!
--template_id TEMPLATE_ID
Required parameter for subcommand ["add", "preview-credits"].
You can specify --test_file or --template_id, but not both!
--test_public {public,private}
Required parameter for subcommand ["set-sharing"]
--test_name TEST_NAME
Optional parameter for subcommand ["add", "preview-credits"].
Use it to override the value for "test_name" in your test_file or to simply specify a name for the test
--wait_to_finish Optional parameter for subcommand ["add"]
Use it to set the CLI to wait for the test to finish before exiting.
-p PROFILE, --profile PROFILE
The profile you'll be using.
If not specified, the "default" profile will be used.
If no profiles are defined, the following env variables will be checked:
* RUNGUTAN_TEAM_ID
* RUNGUTAN_API_KEY
```
* Or using environment variables
```shell script
$ docker run \
-e RUNGUTAN_TEAM_ID=my_team \
-e RUNGUTAN_API_KEY=my_api_key \
rungutancommunity/rungutan-cli:latest rungutan domains --help
usage: rungutan [-h] [--domain_name DOMAIN_NAME] [-p PROFILE]
[{list,remove,add}]
Domain command system
positional arguments:
{list,remove,add}
optional arguments:
-h, --help show this help message and exit
--domain_name DOMAIN_NAME
Required parameter for subcommand ["remove", "add"]
-p PROFILE, --profile PROFILE
The profile you'll be using.
If not specified, the "default" profile will be used.
If no profiles are defined, the following env variables will be checked:
* RUNGUTAN_TEAM_ID
* RUNGUTAN_API_KEY
```
| /rungutan-1.9.0.tar.gz/rungutan-1.9.0/README.md | 0.455441 | 0.731922 | README.md | pypi |
from odbms import DBMS, Model
class Project(Model):
TABLE_NAME = 'projects'
def __init__(self, user_id, name, version="0.0.1", description="", homepage="",
language="", runtime="", start_file="", private=False, author={}, created_at=None, updated_at=None, id=None):
super().__init__(created_at, updated_at, id)
self.name = name
self.user_id = user_id
self.version = version
self.description = description
self.homepage = homepage
self.language = language
self.runtime = runtime
self.private = private
self.start_file = start_file
self.author = author
def save(self):
'''
Instance Method for saving Project instance to database
@params None
@return None
'''
data = {
"name": self.name,
"user_id": self.user_id,
"version": self.version,
"description": self.description,
"homepage": self.homepage,
"language": self.language,
"runtime": self.runtime,
"private": self.private,
"start_file": self.start_file,
"author": self.author
}
if DBMS.Database.dbms == 'mongodb':
data["created_at"]: self.created_at
data["updated_at"]: self.updated_at
return DBMS.Database.insert(Project.TABLE_NAME, Model.normalise(data, 'params'))
def user(self):#-> User:
'''
Instance Method for retrieving User of Project instance
@params None
@return User Instance
'''
return Model.normalise(DBMS.Database.find_one('users', Model.normalise({'id': self.user_id}, 'params')))
def functions(self)-> list:
'''
Instance Method for retrieving Functions of Project Instance
@params None
@return List of Function Instances
'''
return DBMS.Database.find('functions', Model.normalise({'project_id': self.id}, 'params'))
def count_functions(self)-> int:
'''
Instance Method for counting function in Project
@params None
@return Count of functions
'''
return DBMS.Database.count('functions', Model.normalise({'project_id': self.id}, 'params'))
def json(self)-> dict:
'''
Instance Method for converting instance to Dict
@paramas None
@return Dict() format of Project instance
'''
return {
"id": str(self.id),
"name": self.name,
"user_id": str(self.user_id),
"version": self.version,
"description": self.description,
"homepage": self.homepage,
"language": self.language,
"runtime": self.runtime,
"private": self.private,
"start_file": self.start_file,
"author": self.author,
"functions": self.count_functions(),
"created_at": self.created_at,
"updated_at": self.updated_at
}
@classmethod
def get_by_user(cls, user_id: str)-> list:
'''
Class Method for retrieving projects by a user
@param user_id:str _id of the user
@return List of Project instances
'''
projects = DBMS.Database.find(Project.TABLE_NAME, Model.normalise({'user_id': user_id}, 'params'))
return [cls(**Model.normalise(elem)) for elem in projects] | /runit_server-0.2.4-py3-none-any.whl/runit_server/models/project.py | 0.609873 | 0.167015 | project.py | pypi |
import inspect
from typing import Dict, Optional
from uuid import UUID
from pydantic import BaseModel
from sdk import globals
from sdk.constants.enums import Mode
from sdk.primitive import Primitive
class ContextKey(BaseModel):
key: str
class Context:
def __getitem__(self, key):
return ContextKey(key=key)
class Task(BaseModel):
name: str
mode: Mode
primitives: Dict[UUID, Primitive] = {}
result: Optional[UUID] = None
def add_primitive(self, primitive):
self.primitives[primitive.id] = primitive
# TODO (LLM-616): We need to issue a request to the backend to actually run the task.
def __call__(self, context: Optional[Dict] = None) -> Dict:
if self.mode == Mode.BATCH:
assert context is None, "Batch task must not have a context"
elif self.mode == Mode.REAL_TIME:
assert context is not None, "Real-time task must have a context"
assert isinstance(context, Dict), "Context must be a dictionary"
# Extracting all keys required by primitives
required_keys = {
key for primitive in self.primitives.values() for key in primitive.context_keys
}
# Checking if all required keys are present in context
if not all(key in context for key in required_keys):
missing_keys = [key for key in required_keys if key not in context]
raise ValueError(f"Context is missing the following required keys: {missing_keys}")
data = self.dict(exclude_none=True)
if context:
data["context"] = context
return data
def task(name: str, mode: str):
mode = Mode(mode) # Convert the string to the Mode enum
if mode not in Mode: # Check if the mode is valid
raise ValueError(f"Invalid mode: {mode}")
def decorator(func):
sig = inspect.signature(func)
if mode == Mode.REAL_TIME:
if len(sig.parameters) != 1 or "context" not in sig.parameters:
raise TypeError("Real-time task function must accept a single argument 'context'")
elif mode == Mode.BATCH:
if len(sig.parameters) != 0:
raise TypeError("Batch task function must accept no arguments")
try:
globals.current_task = Task(name=name, mode=mode)
if mode == Mode.REAL_TIME:
context = Context()
result = func(context)
assert isinstance(result, Primitive), "Real-time task must return a Primitive"
globals.current_task.result = result.id
else:
result = func()
assert result is None, "Batch task must not return anything"
task_instance = globals.current_task
finally:
# Always set current_task to None, even if an exception was thrown
globals.current_task = None
return task_instance
return decorator | /runllm-0.0.0-py3-none-any.whl/sdk/task.py | 0.672869 | 0.194005 | task.py | pypi |
Usage
=====
Runmanager is the primary means of defining and managing the set of experiment parameters
(global variables - see §5.1.3) used in the labscript experiment logic. Runmanager also
handles the creation of each hdf5 shot file, and the invocation of labscript via the execution
of a user specified labscript experiment logic file.
The graphical interface
-----------------------
We believe that the manipulation of parameters, along with controls
for producing shots, are best implemented in a graphical interface. Critical information
on the current runmanager configuration, along with controls for generating new shots, are
located in a always visible toolbar at the top of the runmanager interface. These comprise
(as labelled in :numref:`fig-overview`):
.. _fig-overview:
.. figure:: img/runmanager_overview.png
The runmanager graphical interface with the main controls labelled as per the
below text. The ‘output’ tab is currently shown.
#. The engage button: This begins production of the appropriate number of shot files.
The number of shot files that will be produced is displayed prominently in the button
text so that any mistakes made when defining the parameter space scan can be quickly
corrected prior to beginning shot generation. This button can also be ‘clicked’ via the
F5 key on a keyboard.
#. The abort button: This stops the production of shot files prematurely.
#. The restart subprocess button: Primarily for debugging and for use during labscript
development, this button restarts the subprocess that manages the execution of the
labscript experiment logic file, which in turn generates and stores hardware instructions
inside the hdf5 file (see :ref:`usage:Shot creation`).
#. The shuffle checkbox: This checkbox controls the global setting for whether parameter
space scans are shuffled or not. This is a tri-state checkbox (all-some-none) displaying
the current shuffle state on the axes tab. Clicking the checkbox will overwrite the
state of each entry on the axes tab with the new state of the global checkbox. For
more details, see :ref:`usage:Parameter space scans`.
#. The run shots checkbox: If ticked prior to clicking the engage button, shot files will
be sent immediately to the BLACS queue once the hardware instructions have been
generated by labscript.
#. The view shots checkbox: If ticked prior to clicking the engage button, shots will be
sent to runviewer once the hardware instructions have been generated by labscript.
Runviewer is assumed to be running locally, and will be launched if it is not already
running once the first hdf5 file has been generated.
#. The labscript file: The Python file containing the experiment logic to be compiled
into hardware instructions (see :doc:`labscript <labscript:index>`).
#. The shot output folder: The location to store the hdf5 shot files. By default, the
location in specified by the combination of a value in the laboratory PC configuration
file (see :doc:`labconfig <labscript-suite:labconfig>`), the name of the experiment logic Python file and
the current date. The location automatically updates, at midnight, to a new folder
for the day provided the folder location is left as the default.
#. The BLACS hostname: The network hostname of the PC the hdf5 shot files are to
be sent to if the ‘run shots’ checkbox is ticked. It is expected that BLACS is running
on the specified PC, and that network access (including firewalls and other network
access controls) is configured appropriately.
#. The open in editor button: This button open the specified labscript experiment logic
file in the text editor specified in the laboratory PC configuration file (see labconfig
in the glossary).
#. The reset shot output folder button: This button resets the shot output folder to
the default. This will re-enable the auto incrementation of the folder (based on the
current date), which is disabled for custom locations.
These controls provide rapid access to the key functionality of runmanager (creating and
distributing shot files) at all times, making for an efficient workflow.
The rest of the runmanager interface exists within a set of tabs. The first 3 tabs contain
further runmanager specific controls:
12. The output tab: This tab contains the terminal output of the shot creation process
including the terminal output produced during the execution of the labscript experiment
logic file. For example, Python `print` statements included in
the experiment logic code will appear here during shot creation. This makes it easy
to debug the experiment logic code using simple methods common to general purpose
programming. Warnings and error messages generated by the labscript API also appear
here in red text, so that any issues are immediately noticed and can be actioned.
As this output is useful for debugging purposes, we allow the tab to be ‘popped out’
into a separate window so it can be visible at the same time as another tab (to avoid
the need to frequently switch between the output and the tab containing the global
variable(s) you are currently modifying).
13. The axes tab: This tab allows the user to control the iteration order of the parameters
in the defined parameter space (see :numref:`fig-axes`). The length of each axis of the
parameter space is displayed, as is a shuffle checkbox for determining whether the
points along that axis should be shuffled before the parameter space is expanded into
the set of shots to be created. The global shuffle control (see item 4 in this list) is
linked to the state of the shuffle checkboxes on the axes tab. This feature, along
with the many benefits, is detailed further in :ref:`usage:Parameter space scans` (see feature 3 and the paragraphs
following).
14. The groups tab: This tab manages the hdf5 files that store the globals (see :numref:`fig-groups`).
Further details on managing global variables will be discussed in :ref:`usage:Managing global variables`.
These tabs are then followed by an arbitrary number of tabs containing sets of global
variables, which will be discussed further in :ref:`usage:Managing global variables`.
.. _fig-axes:
.. figure:: img/runmanager_axes.png
The ‘Axes’ tab of runmanager. This tab displays a list of all global variables
(indicated by the blue outer product icon) or groups of global variables (indicated by the
icon with red parallel bars) that form axes of the parameter space that will be scanned over
(see item 13 and :ref:`usage:Parameter space scans` for further details). The order of the axes
can be changed using the controls to the left of the list, which sets the order in which the
outer product of the axes is performed (when generating the shot files).
In addition to this, runmanager can save and restore the entire GUI state via the
relevant menu items in the ‘File’ menu. This allows rapid switching between different types
of experiment logic and/or globals files. [2]_ This is particularly useful for shared experiment
apparatuses, where different users want to run different experiments, and for the cases
where a user wishes to rapidly switch between one of more diagnostic configurations they
have previously saved.
Managing global variables
-------------------------
Runmanager provides a simple interface for grouping and modifying global variables. As
mentioned previously, the ‘groups’ tab in runmanager handles creating and opening the
hdf5 files that store the global variables. There are two levels of organisation for global
variables:
* at the file level (globals can be stored across multiple files, the union of which is used
to generate shots), and
* groups within each file.
.. _fig-groups:
.. figure:: img/runmanager_groups.png
The ‘Groups’ tab of runmanager. This tab displays the groups of global
variables (stored in hdf5 files) that have been loaded into runmanager. From this tab,
users can enabled/disable the use of these globals when compiling shots (using the ‘active’
checkboxes) and open/close an editing tab for each group. The editing tabs, when open, are
displayed as additional tabs on the left most edge of the runmanager interface. See :ref:`usage:Managing global variables`
for further details on managing globals.
Globals groups are created from the ‘groups’ tab in runmanager and can have arbitrary
names (including spaces and special symbols). The only requirement is that a group name
is unique within its file (it can however have the same name as a group in a different file).
Globals within a group are then only used in the creation of shots if the ‘active’ checkbox
for the group is checked on the groups tab (see :numref:`fig-groups`). This provides a simple way
of switching between different groups of globals, allowing labs to maintain a common set
of parameters for their experiments as well as individual parameter sets for specific users
and/or experiments. For example, rather than modifying a set of globals in a group, a user
could instead deactivate the group containing those globals, and instead ask runmanager to
pull those globals from a separate file.
Each group of globals can be opened for editing in a new tab. We provide columns
for the global name, value and units. The global name must be a :doc:`valid Python variable
name <python:reference/lexical_analysis>`, and must not conflict with any member of the pylab library, python keywords,
or existing items in the Python `__builtin__` module. This ensures that it can be injected
into the labscript experiment logic (see :doc:`labscript <labscript:index>`) without conflicting with existing Python
functionality. The global name must also be unique across all active groups, as global
groups are joined into a single set before passing the globals into the labscript experiment
logic.
The value of a global can be any Python expression (including the use of functions from
the numpy module), that evaluates to a datatype supported by hdf5, such as, but not
limited to:
* a number: `1234` or `780e-9`,
* a string: `'N_atoms'`,
* a list or numpy array (which will be treated as an axis of a parameter space to scan,
where the global variable will contain only one of the elements of the list or array in
each shot): `[1, 2, 3]` or `array([1, 2, 3])`,
* a tuple (which despite being list like, will not be treated as an axis of a parameter
space to scan and will instead be passed into labscript as the tuple specified): `(1, 2, 3)`,
* a Boolean: `True` or `False`
* an equation: `1+2`
* a Python inbuilt, or numpy, function call that returns a valid value: `linspace(0, 10, 10)`,
* an expression that references another defined global variable by name (the value
of this global variable is used in its place): `2*other_global` or `linspace(0, 10, other_global)`,
* a Boolean expression: `(other_global1 and other_global2)` or `(other_global3 == 7)`
or `(other_global4)`, or
* any of the above plus a Python comment: `780e-9 #This was previously 781e-9`.
As these expressions can become quite complex (see :numref:`fig-complex-globals`), the tooltip for the value
cells displays the evaluated result of the Python expression. The value cell is also colour
coded to the successful evaluation of the expression, so that mistakes can be easily identified
(see :numref:`fig-evaluation-error`).
.. _fig-complex-globals:
.. figure:: img/runmanager_complex_globals.png
An example of complex global variables that utilise Python expressions to define their value.
Note, for example the `drop_time` global variable, whose full expression is shown below. The
`drop_time` used is always drawn from one of three global variables, but the global variable selected
is determined by a separate global variable (a Boolean) and may contain a list of drop times if
the user wishes to image multiple species. In the case of the expression generating a list, this
global becomes an axis of a parameter space, running two shots for every other data point in the
parameter space (one shot to image each of the two species our experiment supports). Such an
expression could not be defined within experiment logic as parameter spaces must be defined within
runmanager, not labscript. In order to simplify the view of globals with complex expressions, the
tooltip (shown for the `central_image_rb` global) shows the value(s) the global will take in the next
compiled shot(s).
.. code-block:: python
:caption: Full expression for `drop_time` global
[ drop_time_rb if x else drop_time_k for x in central_image_order ] if
s11__imaging_both_species else ( drop_time_k if central_image_k == True
else ( drop_time_rb if central_image_rb == True else drop_time_general))
The units of the global are not currently passed into the labscript experiment logic
code, but are a way to provide context to the user within runmanager. For example, if the
labscript experiment logic multiplied a global variable for a frequency by `1e6` everywhere
it was used (or the keyword argument `units="MHz"` was used everywhere), then you could
type ‘MHz’ into the units column of runmanager so that a later user would know that the
global was expected to be of that magnitude and would not accidentally enter it in kHz
or Hz. In addition to this, globals whose values are explicitly specified as either `True` or
`False` have their units automatically set to ‘Bool’, a checkbox is placed in the units column
for easy toggling, and the units cell is colour coded to this checkbox for easy observation
of the state. We frequently use this functionality to enable/disable various stages of our
experiment logic file (see :numref:`fig-bools`).
.. _fig-evaluation-error:
.. figure:: img/runmanager_eval_error.png
An example of an evaluation error in a global variable. The user is notified of
the error in two places: an icon appears next to the tab name and the global in question is
highlighted in red. The tooltip displays the cause of the error, in this case a Python syntax
error.
While we recommend storing globals in a dedicated set of files, the storage format for
the globals is identical to that in any shot, which allows a user to easily load in globals from
existing shots (even ones that have been executed and analysed). However, once pointed
at an existing shot file, any modification to globals will modify that shot file, thus partially
destroying the complete record of the experiment. [3]_ Thus, we encourage this feature to only
be used for the cases where you wish to look at the globals from an old shot or where you
wish to use the globals, without modification, to compile new shots.
.. _fig-bools:
.. figure:: img/runmanager_bools.png
An example of how a labscript experiment can be parameterised by a series
of Boolean global variables. Here we split up the production of a BEC into several stages.
We name each global with a prefix that increments in order to keep the globals in an
appropriate sort order. Runmanager detects the Boolean type of the global, and provides a
simple checkbox toggle in the units column, By using these global variables in our labscript
experiment logic file, as the Boolean expression for an if statement, we can quickly turn
on/off various stages of the BEC production process (which is very useful when debugging
or optimising the BEC production process).
Parameter space scans
---------------------
One of the key features of runmanager (and critical goals of our scientific control system)
is the ability to easily automate the traversal of a large parameter space, an increasingly
important requirement for performing modern ultracold atom experiments. Runmanager
provides four features for managing parameter space scans:
#. The automatic detection of global variables that are defined as a list. [4]_ Such globals
are labelled ‘outer’ in the expansions column as all such globals will be combined, via
an outer product, into the parameter space to be scanned. The number of shots to be
generated, which is simply the product of the lengths of all ‘outer’ product globals, is
displayed next to the engage button.
#. The ability to define what we term ‘zip groups’ after the Python function `zip`. Two (or
more) globals (specified as lists) can be grouped together so that they iterate through
values in lockstep. In this instance, the zip group is used as a single axis of the outer
product rather than one axis for each global.
#. The third feature is the ability to define the order in which the axes of the parameter
space are iterated over when producing individual shots (see the ‘Axes’ tab discussed
previously in :ref:`usage:The graphical interface`).
#. The ability to randomly shuffle the order of values within each global (or zip group)
defined as a list. This can be done on a per global basis or on the entire set of shots
that spans the defined parameter space.
These features provide a powerful basis for performing complex experiments.
Consider the following example. Many of the early stages of BEC production (for
instance the MOT or magnetic trap stages) should be optimised for best phase-space density.
Phase space density is calculated from several parameters; the most important being atom
number and atom cloud temperature. While atom number can be easily measured from
an absorption image from a single shot, temperature is most commonly determined from
analysing the result of multiple shots. In this case, the drop time (the time between releasing
the atoms from the trap and taking the absorption image) is varied for each shot and the
temperature determined by fitting to the linearised relationship between atom cloud size
and drop time. Already, it can be seen that measuring the phase space density for a single
set of parameters requires several shots, which can be easily automated via the feature 1
described above.
Now consider the optimisation of MOT or magnetic trap parameters. Many of these are
coupled and can not be independently optimised. As such, it is preferable to optimise two
or three variables at once, measuring the phase-space density at each point to determine the
optimal set of parameters. Such a parameter space typically takes several hours to complete
due to the large number of shots that must be run. A BEC apparatus is likely to undergo
systematic drifts during this time, which may invalidate the results. However, with careful
thought, features 3 and 4 can be used to counteract this. For example, systematic drift will
effect the linearity of the data when determining temperature, especially if the acquisition
of each data point is separated by a significant period of time. However, by defining the
drop time to be the inner most item of the outer product, you ensure that all shots needed
to determine the phase-space density for a single set of MOT parameters are executed as
close together in time as possible. Shuffling the order of the drop time then eliminates short
term systematic drift, as does separately shuffling the order of the values in each remaining
axes of the outer product (the MOT parameters). If long term systematic drifts need to
be quantified, then an additional axes to the outer product can be added at the outer most
layer in order to repeat each of the shots a prescribed number of times (by defining an
additional ‘dummy’ global variable as `range(N)` where `N` is the number of times to repeat
each shot).
While the above example may seem complicated, runmanager makes it trivial to implement.
A user simply defines the list of values to scan over for each parameter, sets the order
in which the outer product should use each axis, and specifies whether the values for each
axis should be shuffled. Once done, clicking the engage button generates the sequence of
shots and sends them to BLACS to be executed on the experiment.
Evaluation of globals
---------------------
All global variable expressions are automatically evaluated after a change to any global
variable. This serves to both update the tooltip with the result of the expression, detect
axes of a parameter space to scan (and group them into zip groups if appropriate) and
warn the user of any errors during the evaluation of the globals. As discussed previously,
runmanager allows these global variable expressions to reference other global variables. This
allows a user to maintain a record of a set of parameters, and all relevant quantities derived
from one or more of those parameters, without ever storing a parameter more than once.
This ensures that important quantities need not be derived (from globals) in the labscript
experiment logic script, and that they are accessible directly during the analysis stage (see
:doc:`lyse <lyse:index>`).
To implement this, we take advantage of the Python built-in function exec which not
only evaluates a string containing a Python expression, but can do so from within a controlled
namespace. This has a two-fold benefit. The first is that it allows us to provide
access to a specific set of functions that can be used from within the Python expressions
(such as numpy functions like `linspace`). The second is that it allows us to keep track of the
relationship between global variables, which is critical for both descriptive error messages
and automatically detecting which globals should be combined into a zip groups.
The Python `exec` function is given access to a namespace to work in via an optional
argument in the form of a dictionary. Keys and values in this dictionary correspond to
variable names in the namespace and their associated values respectively. Rather than
using a native Python dictionary for the namespace, we subclass the Python dictionary
and override the built-in dictionary method for looking up entries in the dictionary. When
combined with exec, this translates to our `dict` subclasses tracking each time the `exec`
function requests the value of a variable in the namespace. This then provides us with a
mapping of each global variable, and the names of global variables that it depends on. In
order to resolve both the order in which global variable expressions are evaluated in, and
detection of any recursive relationships, we begin by evaluating all global expressions and
then recursively re-evaluate the set of globals that did not evaluate in the previous iteration.
The first iteration will evaluate any (correctly defined) independent globals, and subsequent
iterations will then be able to evaluate globals that depend on other globals (once those
other globals have been evaluated by a previous iteration).
The hierarchy of global interdependencies is then used to determine automatic zip group
names, which are based on the name of the global in the hierarchy that does not depend on
any other. If a global depends on multiple other globals, then the zip group name is chosen
semi-randomly based on the order of the items in the Python dictionary (which depends on
a hash of the dictionary key names and the size of the dictionary). However, it is of course
always possible to overwrite the automatic zip group name with something else should our algorithm choose incorrectly.
We believe that this complex evaluation of global variables is only possible due to the
use of an interpreted language that has tools for parsing its own syntax. As such, the
choice of Python as our programming language has allowed us to implement extremely
useful, advanced features that might otherwise be too difficult to produce in more low level
languages such as C++.
Shot creation
-------------
The internal process for generating shot files is quite complex. This is primarily motivated
by the desire for modularity (for example, to separate shot file generation from hardware
instruction generation) and the desire for robustness. As runmanager ultimately initiates the
execution of user code (the labscript experiment logic file), there is a risk that problems in the
user code could crash runmanager. We mitigate this by using a multi-process architecture.
We originally spawned a new Python process for each shot (in order to guarantee the
internal state of labscript was fresh). However the time required to start a Python process
(especially on Windows) was a considerable fraction of the entire shot generation time. As
such we now use a single, long-lived, Python process and clean-up the internal state of
labscript and Python explicitly after each shot.
To generate shot files, runmanager:
#. Re-evaluates all globals (see :ref:`usage:Evaluation of globals`). This both determines the number of shots to
produce, and generates the evaluated set of global variables for each shot.
#. The globals are then written to hdf5 files, one file for each shot. We also write
the unevaluated globals into every hdf5 file, in order to provide a complete record
of the experiment (the unevaluated globals contain information about the parameter
space that is not available when looking at the single point of parameter space in the
evaluated globals of a single shot file).
#. In a thread (in order to keep the GUI responsive), we iterate over the set of files
and send their file paths to a long-running subprocess (launched by runmanager at
startup) that is used to execute labscript code in an isolated environment. We call
this process the ‘compilation subprocess’.
#. The subprocess, which has the labscript API imported, calls an initialisation method
to inform the labscript API of the hdf5 file to write hardware instructions to.
#. The subprocess loads the global variables from runmanager into the `__builtin__`
dictionary.
#. The subprocess then executes the labscript experiment logic file (using the Python
function `exec`) in an isolated namespace, which invokes the labscript API via the
users experiment logic and generates the required hardware instructions and saves
them in the hdf5 file. Terminal output (for example, `print` statements) are sent
back to the parent runmanager process and placed in the output tab.
#. The subprocess restores the `__builtin__` dictionary to its original state to prevent
globals from polluting subsequent shots. A clean-up method from the labscript API
is also called so that the internal state of the labscript Python module is also reset.
Once shot files are created, the file paths are sent to runviewer or BLACS, as determined by
the checkboxes in the runmanager GUI, for viewing and/or executing the shots respectively.
This architecture also has several unrealised benefits:
#. If the need arose, we could easily parallelise the generation of hardware instructions
by instantiating multiple instances of the compilation subprocess.
#. We could use runmanager as a generic parameter (space) management software by
replacing the compilation subprocess with something else. For example, runmanager
could be used to manage parameters for simulations, producing one shot file per
simulation to be run in the same way we do for real experiments. These files could
then be sent to a scheduling program (like BLACS) that feeds them to the simulation
software.
.. rubric:: Footnotes
.. [1] Documentation taken from Phillip T. Starkey *A software framework for control and automation of precisely timed experiments*
Thesis, Monash University (2019) https://doi.org/10.26180/5d1db8ffe29ef
.. [2] For clarity, the values of the globals are not saved in this configuration file, but simply the location
of the hdf5 file containing the globals. This means that any globals in files shared between saved
runmanager configurations will share their values. For cases where global values should differ between
runmanager configurations, separate globals files should be used.
.. [3] Note that in an executed shot file, globals exist in two formats: the evaluated format (one point in the
parameter space) used by labscript, and the raw strings as displayed in runmanager. Only the latter
would be overwritten if globals were edited in the manor described in the main body text.
.. [4] Runmanager considers both Python lists and numpy arrays to be what we refer to as ‘lists’ in this
section.
| /runmanager-3.2.0rc1.tar.gz/runmanager-3.2.0rc1/docs/source/usage.rst | 0.947143 | 0.867148 | usage.rst | pypi |
"""The vision/dataset module containing the vision Dataset class and its functions."""
# pylint: disable=protected-access
import random
from abc import abstractmethod
from collections import defaultdict
from copy import copy
from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Type, TypeVar, Union
import numpy as np
import torch
from torch.utils.data import BatchSampler, DataLoader, Dataset, Sampler
from runml_checks.core.errors import (runml_checksBaseError, runml_checksNotImplementedError, runml_checksValueError,
ValidationError)
from runml_checks.utils.logger import get_logger
from runml_checks.vision.batch_wrapper import Batch
from runml_checks.vision.task_type import TaskType
from runml_checks.vision.utils.image_functions import ImageInfo
from runml_checks.vision.utils.transformations import get_transforms_handler
__all__ = ['VisionData']
VD = TypeVar('VD', bound='VisionData')
class VisionData:
"""VisionData represent a base task in runml_checks. It wraps PyTorch DataLoader together with model related metadata.
The VisionData class is containing additional data and general methods intended for easily accessing
metadata relevant for validating a computer vision ML models.
Parameters
----------
data_loader : DataLoader
PyTorch DataLoader object. If your data loader is using IterableDataset please see note below.
num_classes : int, optional
Number of classes in the dataset. If not provided, will be inferred from the dataset.
label_map : Dict[int, str], optional
A dictionary mapping class ids to their names.
transform_field : str, default: 'transforms'
Name of transforms field in the dataset which holds transformations of both data and label.
"""
def __init__(
self,
data_loader: DataLoader,
num_classes: Optional[int] = None,
label_map: Optional[Dict[int, str]] = None,
transform_field: Optional[str] = 'transforms'
):
# Create data loader that uses IndicesSequentialSampler, which always return batches in the same order
self._data_loader, self._sampler = self._get_data_loader_sequential(data_loader)
self._num_classes = num_classes
self._label_map = label_map
self._transform_field = transform_field
self._image_formatter_error = None
self._label_formatter_error = None
self._get_classes_error = None
batch = next(iter(self._data_loader))
try:
self.validate_image_data(batch)
except runml_checksNotImplementedError:
self._image_formatter_error = 'batch_to_images() was not implemented, some checks will not run'
get_logger().warning(self._image_formatter_error)
except ValidationError as ex:
self._image_formatter_error = f'batch_to_images() was not implemented correctly, the validation has ' \
f'failed with the error: "{ex}". To test your image formatting use the ' \
f'function `validate_image_data(batch)`'
get_logger().warning(self._image_formatter_error)
try:
self.validate_label(batch)
except runml_checksNotImplementedError:
self._label_formatter_error = 'batch_to_labels() was not implemented, some checks will not run'
get_logger().warning(self._label_formatter_error)
except ValidationError as ex:
self._label_formatter_error = f'batch_to_labels() was not implemented correctly, the validation has ' \
f'failed with the error: "{ex}". To test your label formatting use the ' \
f'function `validate_label(batch)`'
get_logger().warning(self._label_formatter_error)
try:
if self._label_formatter_error is None:
self.validate_get_classes(batch)
else:
self._get_classes_error = 'Must have valid labels formatter to use `get_classes`'
except runml_checksNotImplementedError:
self._get_classes_error = 'get_classes() was not implemented, some checks will not run'
get_logger().warning(self._get_classes_error)
except ValidationError as ex:
self._get_classes_error = f'get_classes() was not implemented correctly, the validation has ' \
f'failed with the error: "{ex}". To test your formatting use the ' \
f'function `validate_get_classes(batch)`'
get_logger().warning(self._get_classes_error)
self._classes_indices = None
self._current_index = None
@classmethod
def from_dataset(
cls: Type[VD],
data: Dataset,
batch_size: int = 64,
shuffle: bool = True,
num_workers: int = 0,
pin_memory: bool = True,
collate_fn: Optional[Callable] = None,
num_classes: Optional[int] = None,
label_map: Optional[Dict[int, str]] = None,
transform_field: Optional[str] = 'transforms'
) -> VD:
"""Create VisionData instance from a Dataset instance.
Parameters
----------
data : Dataset
instance of a Dataset.
batch_size: int, default 64
how many samples per batch to load.
shuffle : bool, default True:
set to ``True`` to have the data reshuffled at every epoch.
num_workers int, default 0:
how many subprocesses to use for data loading.
``0`` means that the data will be loaded in the main process.
pin_memory bool, default True
If ``True``, the data loader will copy Tensors into CUDA pinned memory
before returning them.
collate_fn : Optional[Callable]
merges a list of samples to form a mini-batch of Tensor(s).
num_classes : Optional[int], default None
Number of classes in the dataset. If not provided, will be inferred from the dataset.
label_map : Optional[Dict[int, str]], default None
A dictionary mapping class ids to their names.
transform_field : Optional[str], default: 'transforms'
Name of transforms field in the dataset which holds transformations of both data and label.
Returns
-------
VisionData
"""
def batch_collate(batch):
imgs, labels = zip(*batch)
return list(imgs), list(labels)
return cls(
data_loader=DataLoader(
dataset=data,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn or batch_collate
),
num_classes=num_classes,
label_map=label_map,
transform_field=transform_field
)
@abstractmethod
def get_classes(self, batch_labels: Union[List[torch.Tensor], torch.Tensor]) -> List[List[int]]:
"""Get a labels batch and return classes inside it."""
raise runml_checksNotImplementedError('get_classes() must be implemented in a subclass')
@abstractmethod
def batch_to_labels(self, batch) -> Union[List[torch.Tensor], torch.Tensor]:
"""Transform a batch of data to labels."""
raise runml_checksNotImplementedError('batch_to_labels() must be implemented in a subclass')
@abstractmethod
def infer_on_batch(self, batch, model, device) -> Union[List[torch.Tensor], torch.Tensor]:
"""Infer on a batch of data."""
raise runml_checksNotImplementedError('infer_on_batch() must be implemented in a subclass')
@abstractmethod
def batch_to_images(self, batch) -> Sequence[np.ndarray]:
"""
Transform a batch of data to images in the accpeted format.
Parameters
----------
batch : torch.Tensor
Batch of data to transform to images.
Returns
-------
Sequence[np.ndarray]
List of images in the accepted format. Each image in the iterable must be a [H, W, C] 3D numpy array.
See notes for more details.
:func: `batch_to_images` must be implemented in a subclass.
Examples
--------
>>> import numpy as np
...
...
... def batch_to_images(self, batch):
... # Converts a batch of normalized images to rgb images with range [0, 255]
... inp = batch[0].detach().numpy().transpose((0, 2, 3, 1))
... mean = [0.485, 0.456, 0.406]
... std = [0.229, 0.224, 0.225]
... inp = std * inp + mean
... inp = np.clip(inp, 0, 1)
... return inp * 255
Notes
-----
Each image in the iterable must be a [H, W, C] 3D numpy array. The first dimension must be the image height
(y axis), the second being the image width (x axis), and the third being the number of channels. The numbers in
the array should be in the range [0, 255]. Color images should be in RGB format and have 3 channels, while
grayscale images should have 1 channel.
"""
raise runml_checksNotImplementedError('batch_to_images() must be implemented in a subclass')
def validate_label(self, batch):
"""Validate a batch of labels."""
# default implementation just calling the function to see if it runs
self.batch_to_labels(batch)
def validate_prediction(self, batch, model, device):
"""
Validate the prediction.
Parameters
----------
batch : t.Any
Batch from DataLoader
model : t.Any
device : torch.Device
Raises
------
ValidationError
If predictions format is invalid (depends on validate_infered_batch_predictions implementations)
runml_checksNotImplementedError
If infer_on_batch not implemented
"""
self.validate_infered_batch_predictions(self.infer_on_batch(batch, model, device))
@staticmethod
def validate_infered_batch_predictions(batch_predictions):
"""Validate the infered predictions from the batch."""
# isn't relevant for this class but is still a function we want to inherit
def update_cache(self, batch: Batch):
"""Get labels and update the classes' metadata info."""
try:
# In case there are no labels or there is an invalid formatter function, this call will raise exception
classes_per_label = self.get_classes(batch.labels)
except runml_checksBaseError:
self._classes_indices = None
return
for batch_index, classes in enumerate(classes_per_label):
for single_class in classes:
dataset_index = self.to_dataset_index(self._current_index + batch_index)[0]
self._classes_indices[single_class].append(dataset_index)
self._current_index += len(classes_per_label)
def init_cache(self):
"""Initialize the cache of the classes' metadata info."""
self._classes_indices = defaultdict(list)
self._current_index = 0
@property
def classes_indices(self) -> Dict[int, List[int]]:
"""Return dict of classes as keys, and list of corresponding indices (in Dataset) of samples that include this\
class (in the label)."""
if self._classes_indices is None:
raise runml_checksValueError('Could not process labels.')
if self._current_index < len(self._sampler):
raise runml_checksValueError('Cached data is not computed on all the data yet.')
return self._classes_indices
@property
def n_of_samples_per_class(self) -> Dict[Any, int]:
"""Return a dictionary containing the number of samples per class."""
return {k: len(v) for k, v in self.classes_indices.items()}
@property
def data_loader(self) -> DataLoader:
"""Return the data loader."""
return self._data_loader
@property
def transform_field(self) -> str:
"""Return the data loader."""
return self._transform_field
@property
def has_labels(self) -> bool:
"""Return True if the data loader has labels."""
return self._label_formatter_error is None
@property
def has_images(self) -> bool:
"""Return True if the data loader has images."""
return self._image_formatter_error is None
@property
def task_type(self) -> TaskType:
"""Return the task type: classification, object_detection or other."""
return TaskType.OTHER
@property
def num_classes(self) -> int:
"""Return the number of classes in the dataset."""
if self._num_classes is None:
self._num_classes = len(self.classes_indices.keys())
return self._num_classes
@property
def num_samples(self) -> int:
"""Return the number of samples in the dataset."""
return len(self._sampler)
@property
def original_num_samples(self) -> int:
"""Return the number of samples in the original dataset."""
return len(self._data_loader.dataset)
@property
def data_dimension(self):
"""Return how many dimensions the image data have."""
image = self.batch_to_images(next(iter(self)))[0] # pylint: disable=not-callable
return ImageInfo(image).get_dimension()
def label_id_to_name(self, class_id: int) -> str:
"""Return the name of the class with the given id."""
# Converting the class_id to integer to make sure it is an integer
class_id = int(class_id)
if self._label_map is None:
return str(class_id)
elif class_id not in self._label_map:
get_logger().warning('Class id %s is not in the label map. Add it to map '
'in order to show the class name instead of id', class_id)
return str(class_id)
else:
return self._label_map[class_id]
def get_transform_type(self):
"""Return transforms handler created from the transform field."""
dataset_ref = self._data_loader.dataset
# If no field exists raise error
if not hasattr(dataset_ref, self._transform_field):
msg = f'Underlying Dataset instance does not contain "{self._transform_field}" attribute. If your ' \
f'transformations field is named otherwise, you cat set it by using "transform_field" parameter'
raise runml_checksValueError(msg)
transform = dataset_ref.__getattribute__(self._transform_field)
return get_transforms_handler(transform, self.task_type)
def get_augmented_dataset(self, aug) -> VD:
"""Return a copy of the vision data object with the augmentation in the start of it."""
transform_handler = self.get_transform_type()
new_vision_data = self.copy()
new_dataset_ref = new_vision_data.data_loader.dataset
transform = new_dataset_ref.__getattribute__(self._transform_field)
new_transform = transform_handler.add_augmentation_in_start(aug, transform)
new_dataset_ref.__setattr__(self._transform_field, new_transform)
return new_vision_data
def copy(self, n_samples: int = None, shuffle: bool = False, random_state: int = None) -> VD:
"""Create new copy of this object, with the data-loader and dataset also copied, and altered by the given \
parameters.
Parameters
----------
n_samples : int , default: None
take only this number of samples to the copied DataLoader. The samples which will be chosen are affected
by random_state (fixed random state will return consistent samples).
shuffle : bool, default: False
Whether to shuffle the samples order. The shuffle is affected random_state (fixed random state will return
consistent order)
random_state : int , default: None
random_state used for the psuedo-random actions (sampling and shuffling)
"""
new_vision_data = copy(self)
copied_data_loader, copied_sampler = self._get_data_loader_copy(
self.data_loader, shuffle=shuffle, random_state=random_state, n_samples=n_samples
)
new_vision_data._data_loader = copied_data_loader
new_vision_data._sampler = copied_sampler
# If new data is sampled, then needs to re-calculate cache
if n_samples and self._classes_indices is not None:
new_vision_data.init_cache()
for batch in new_vision_data:
new_vision_data.update_cache(self.batch_to_labels(batch))
return new_vision_data
def to_batch(self, *samples):
"""Use the defined collate_fn to transform a few data items to batch format."""
return self._data_loader.collate_fn(list(samples))
def to_dataset_index(self, *batch_indices):
"""Return for the given batch_index the sample index in the dataset object."""
return [self._sampler.index_at(i) for i in batch_indices]
def batch_of_index(self, *indices):
"""Return batch samples of the given batch indices."""
dataset_indices = self.to_dataset_index(*indices)
samples = [self._data_loader.dataset[i] for i in dataset_indices]
return self.to_batch(*samples)
def validate_shared_label(self, other: VD):
"""Verify presence of shared labels.
Validates whether the 2 datasets share the same label shape
Parameters
----------
other : VisionData
Expected to be Dataset type. dataset to compare
Raises
------
runml_checksValueError
if datasets don't have the same label
"""
if not isinstance(other, VisionData):
raise ValidationError('Check requires dataset to be of type VisionTask. instead got: '
f'{type(other).__name__}')
if self.has_labels != other.has_labels:
raise ValidationError('Datasets required to both either have or don\'t have labels')
if self.task_type != other.task_type:
raise ValidationError('Datasets required to have same label type')
def validate_image_data(self, batch):
"""Validate that the data is in the required format.
The validation is done on the first element of the batch.
Parameters
----------
batch
Raises
------
runml_checksValueError
If the batch data doesn't fit the format after being transformed by self().
"""
data = self.batch_to_images(batch)
try:
sample: np.ndarray = data[0]
except TypeError as err:
raise ValidationError('The batch data must be an iterable.') from err
if not isinstance(sample, np.ndarray):
raise ValidationError('The data inside the iterable must be a numpy array.')
if sample.ndim != 3:
raise ValidationError('The data inside the iterable must be a 3D array.')
if sample.shape[2] not in [1, 3]:
raise ValidationError('The data inside the iterable must have 1 or 3 channels.')
sample_min = np.min(sample)
sample_max = np.max(sample)
if sample_min < 0 or sample_max > 255 or sample_max <= 1:
raise ValidationError(f'Image data should be in uint8 format(integers between 0 and 255). '
f'Found values in range [{sample_min}, {sample_max}].')
def validate_get_classes(self, batch):
"""Validate that the get_classes function returns data in the correct format.
Parameters
----------
batch
Raises
------
ValidationError
If the classes data doesn't fit the format after being transformed.
"""
class_ids = self.get_classes(self.batch_to_labels(batch))
if not isinstance(class_ids, Sequence):
raise ValidationError('The classes must be a sequence.')
if not all((isinstance(x, Sequence) for x in class_ids)):
raise ValidationError('The classes sequence must contain as values sequences of ints '
'(sequence per sample).')
if not all((all((isinstance(x, int) for x in inner_ids)) for inner_ids in class_ids)):
raise ValidationError('The samples sequence must contain only int values.')
def validate_format(self, model, device=None):
"""Validate the correctness of the data class implementation according to the expected format.
Parameters
----------
model : Model
Model to validate the data class implementation against.
device
Device to run the model on.
"""
from runml_checks.vision.utils.validation import validate_extractors # pylint: disable=import-outside-toplevel
validate_extractors(self, model, device=device)
def __iter__(self):
"""Return an iterator over the dataset."""
return iter(self._data_loader)
def __len__(self):
"""Return the number of batches in the dataset dataloader."""
return len(self._data_loader)
def is_sampled(self):
"""Return whether the vision data is running on sample of the data."""
return self.num_samples < self.original_num_samples
def assert_images_valid(self):
"""Assert the image formatter defined is valid. Else raise exception."""
if self._image_formatter_error is not None:
raise runml_checksValueError(self._image_formatter_error)
def assert_labels_valid(self):
"""Assert the label formatter defined is valid. Else raise exception."""
if self._label_formatter_error is not None:
raise runml_checksValueError(self._label_formatter_error)
if self._get_classes_error is not None:
raise runml_checksValueError(self._get_classes_error)
@staticmethod
def _get_data_loader_copy(data_loader: DataLoader, n_samples: int = None, shuffle: bool = False,
random_state: int = None):
"""Get a copy of DataLoader which is already using IndicesSequentialSampler, altered by the given parameters.
Parameters
----------
data_loader : DataLoader
DataLoader to copy
n_samples : int , default: None
take only this number of samples to the copied DataLoader. The samples which will be chosen are affected
by random_state (fixed random state will return consistent sampels).
shuffle : bool, default: False
Whether to shuffle the samples order. The shuffle is affected random_state (fixed random state will return
consistent order)
random_state : int , default: None
random_state used for the psuedo-random actions (sampling and shuffling)
"""
# Get sampler and copy it indices if it's already IndicesSequentialSampler
batch_sampler = data_loader.batch_sampler
if isinstance(batch_sampler.sampler, IndicesSequentialSampler):
indices = batch_sampler.sampler.indices
else:
raise runml_checksValueError('Expected data loader with sampler of type IndicesSequentialSampler')
# If got number of samples which is smaller than the number of samples we currently have,
# then take random sample
if n_samples and n_samples < len(batch_sampler.sampler):
size = min(n_samples, len(indices))
if random_state is not None:
random.seed(random_state)
indices = random.sample(indices, size)
# Shuffle indices if need
if shuffle:
if random_state is not None:
random.seed(random_state)
indices = random.sample(indices, len(indices))
# Create new sampler and batch sampler
sampler = IndicesSequentialSampler(indices)
new_batch_sampler = BatchSampler(sampler, batch_sampler.batch_size, batch_sampler.drop_last)
props = VisionData._get_data_loader_props(data_loader)
props['batch_sampler'] = new_batch_sampler
return data_loader.__class__(**props), sampler
@staticmethod
def _get_data_loader_props(data_loader: DataLoader):
"""Get properties relevant for the copy of a DataLoader."""
attr_list = ['num_workers',
'collate_fn',
'pin_memory',
'timeout',
'worker_init_fn',
'prefetch_factor',
'persistent_workers']
aval_attr = {}
for attr in attr_list:
if hasattr(data_loader, attr):
aval_attr[attr] = getattr(data_loader, attr)
aval_attr['dataset'] = copy(data_loader.dataset)
return aval_attr
@staticmethod
def _get_data_loader_sequential(data_loader: DataLoader):
"""Create new DataLoader with sampler of type IndicesSequentialSampler. This makes the data loader have \
consistent batches order."""
# First set generator seed to make it reproducible
if data_loader.generator:
data_loader.generator.set_state(torch.Generator().manual_seed(42).get_state())
indices = []
batch_sampler = data_loader.batch_sampler
# Using the batch sampler to get all indices
for batch in batch_sampler:
indices += batch
# Create new sampler and batch sampler
sampler = IndicesSequentialSampler(indices)
new_batch_sampler = BatchSampler(sampler, batch_sampler.batch_size, batch_sampler.drop_last)
props = VisionData._get_data_loader_props(data_loader)
props['batch_sampler'] = new_batch_sampler
return data_loader.__class__(**props), sampler
class IndicesSequentialSampler(Sampler):
"""Samples elements sequentially from a given list of indices, without replacement.
Args:
indices (sequence): a sequence of indices
"""
indices: List[int]
def __init__(self, indices: List[int]) -> None:
super().__init__(None)
self.indices = indices
def __iter__(self) -> Iterator[int]:
return iter(self.indices)
def __len__(self) -> int:
return len(self.indices)
def index_at(self, location):
"""Return for a given location, the real index value."""
return self.indices[location] | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/vision_data.py | 0.963446 | 0.514644 | vision_data.py | pypi |
"""The vision/dataset module containing the vision Dataset class and its functions."""
from abc import abstractmethod
from typing import List, Union
import torch
from runml_checks.core.errors import runml_checksNotImplementedError, ValidationError
from runml_checks.vision.vision_data import TaskType, VisionData
class ClassificationData(VisionData):
"""The ClassificationData class is used to load and preprocess data for a classification task.
It is a subclass of the VisionData class. The ClassificationData class is containing additional data and general
methods intended for easily accessing metadata relevant for validating a computer vision classification ML models.
"""
@property
def task_type(self) -> TaskType:
"""Return the task type (classification)."""
return TaskType.CLASSIFICATION
@abstractmethod
def batch_to_labels(self, batch) -> torch.Tensor:
"""Extract the labels from a batch of data.
Parameters
----------
batch : torch.Tensor
The batch of data.
Returns
-------
torch.Tensor
The labels extracted from the batch. The labels should be in a tensor format of shape (N,), where N is the
number of samples in the batch. See the notes for more info.
Examples
--------
>>> def batch_to_labels(self, batch):
... return batch[1]
Notes
-----
The accepted label format for classification is a tensor of shape (N,), when N is the number of samples.
Each element is an integer representing the class index.
"""
raise runml_checksNotImplementedError('batch_to_labels() must be implemented in a subclass')
@abstractmethod
def infer_on_batch(self, batch, model, device) -> torch.Tensor:
"""Return the predictions of the model on a batch of data.
Parameters
----------
batch : torch.Tensor
The batch of data.
model : torch.nn.Module
The model to use for inference.
device : torch.device
The device to use for inference.
Returns
-------
torch.Tensor
The predictions of the model on the batch. The predictions should be in a OHE tensor format of shape
(N, n_classes), where N is the number of samples in the batch.
Examples
--------
>>> import torch.nn.functional as F
...
...
... def infer_on_batch(self, batch, model, device):
... logits = model.to(device)(batch[0].to(device))
... return F.softmax(logits, dim=1)
Notes
-----
The accepted prediction format for classification is a tensor of shape (N, n_classes), where N is the number of
samples. Each element is an array of length n_classes that represent the probability of each class.
"""
raise runml_checksNotImplementedError('infer_on_batch() must be implemented in a subclass')
def get_classes(self, batch_labels: Union[List[torch.Tensor], torch.Tensor]):
"""Get a labels batch and return classes inside it."""
return batch_labels.reshape(-1, 1).tolist()
def validate_label(self, batch):
"""
Validate the label.
Parameters
----------
batch
"""
labels = self.batch_to_labels(batch)
if not isinstance(labels, torch.Tensor):
raise ValidationError('Check requires classification label to be a torch.Tensor')
label_shape = labels.shape
if len(label_shape) != 1:
raise ValidationError('Check requires classification label to be a 1D tensor')
@staticmethod
def validate_infered_batch_predictions(batch_predictions, n_classes: int = None, eps: float = 1e-3):
"""
Validate the infered predictions from the batch.
Parameters
----------
batch_predictions : t.Any
The infered predictions from the batch
n_classes : int , default: None
Number of classes.
eps : float , default: 1e-3
Epsilon value to be used in the validation, by default 1e-3
Raises
------
ValidationError
If predictions format is invalid
runml_checksNotImplementedError
If infer_on_batch not implemented
"""
if not isinstance(batch_predictions, torch.Tensor):
raise ValidationError('Check requires classification predictions to be a torch.Tensor')
pred_shape = batch_predictions.shape
if len(pred_shape) != 2:
raise ValidationError('Check requires classification predictions to be a 2D tensor')
if n_classes and pred_shape[1] != n_classes:
raise ValidationError(f'Check requires classification predictions to have {n_classes} columns')
if any(abs(batch_predictions.sum(dim=1) - 1) > eps):
raise ValidationError('Check requires classification predictions to be a probability distribution and'
' sum to 1 for each row') | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/classification_data.py | 0.973418 | 0.922831 | classification_data.py | pypi |
"""The vision/dataset module containing the vision Dataset class and its functions."""
from abc import abstractmethod
from typing import List, Sequence
import torch
from runml_checks.core.errors import runml_checksNotImplementedError, ValidationError
from runml_checks.vision.vision_data import TaskType, VisionData
class DetectionData(VisionData):
"""The DetectionData class is used to load and preprocess data for a object detection task.
It is a subclass of the VisionData class. The DetectionData class is containing additional data and general
methods intended for easily accessing metadata relevant for validating a computer vision object detection ML models.
"""
@property
def task_type(self) -> TaskType:
"""Return the task type (object_detection)."""
return TaskType.OBJECT_DETECTION
@abstractmethod
def batch_to_labels(self, batch) -> List[torch.Tensor]:
"""Extract the labels from a batch of data.
Parameters
----------
batch : torch.Tensor
The batch of data.
Returns
-------
List[torch.Tensor]
The labels extracted from the batch. The labels should be a list of length N containing tensor of shape
(B, 5) where N is the number of samples, B is the number of bounding boxes in the sample and each bounding
box is represented by 5 values. See the notes for more info.
Examples
--------
>>> import torch
...
...
... def batch_to_labels(self, batch):
... # each bbox in the labels is (class_id, x, y, x, y). convert to (class_id, x, y, w, h)
... return [torch.stack(
... [torch.cat((bbox[0], bbox[1:3], bbox[4:] - bbox[1:3]), dim=0)
... for bbox in image])
... for image in batch[1]]
Notes
-----
The accepted label format for is a a list of length N containing tensors of shape (B, 5), where N is the number
of samples, B is the number of bounding boxes in the sample and each bounding box is represented by 5 values:
(class_id, x, y, w, h). x and y are the coordinates (in pixels) of the upper left corner of the bounding box, w
and h are the width and height of the bounding box (in pixels) and class_id is the class id of the prediction.
"""
raise runml_checksNotImplementedError('batch_to_labels() must be implemented in a subclass')
@abstractmethod
def infer_on_batch(self, batch, model, device) -> Sequence[torch.Tensor]:
"""Return the predictions of the model on a batch of data.
Parameters
----------
batch : torch.Tensor
The batch of data.
model : torch.nn.Module
The model to use for inference.
device : torch.device
The device to use for inference.
Returns
-------
Sequence[torch.Tensor]
The predictions of the model on the batch. The predictions should be in a sequence of length N containing
tensors of shape (B, 6), where N is the number of images, B is the number of bounding boxes detected in the
sample and each bounding box is represented by 6 values. See the notes for more info.
Examples
--------
>>> import torch
...
...
... def infer_on_batch(self, batch, model, device):
... # Converts a yolo prediction batch to the accepted xywh format
... return_list = []
...
... predictions = model(batch[0])
... # yolo Detections objects have List[torch.Tensor] xyxy output in .pred
... for single_image_tensor in predictions.pred:
... pred_modified = torch.clone(single_image_tensor)
... pred_modified[:, 2] = pred_modified[:, 2] - pred_modified[:, 0]
... pred_modified[:, 3] = pred_modified[:, 3] - pred_modified[:, 1]
... return_list.append(pred_modified)
...
... return return_list
Notes
-----
The accepted prediction format is a list of length N containing tensors of shape (B, 6), where N is the number
of images, B is the number of bounding boxes detected in the sample and each bounding box is represented by 6
values: [x, y, w, h, confidence, class_id]. x and y are the coordinates (in pixels) of the upper left corner
of the bounding box, w and h are the width and height of the bounding box (in pixels), confidence is the
confidence of the model and class_id is the class id.
"""
raise runml_checksNotImplementedError('infer_on_batch() must be implemented in a subclass')
def get_classes(self, batch_labels: List[torch.Tensor]):
"""Get a labels batch and return classes inside it."""
def get_classes_from_single_label(tensor: torch.Tensor):
return list(tensor[:, 0].type(torch.IntTensor).tolist()) if len(tensor) > 0 else []
return [get_classes_from_single_label(x) for x in batch_labels]
def validate_label(self, batch):
"""
Validate the label.
Parameters
----------
batch
Raises
------
runml_checksValueError
If labels format is invalid
runml_checksNotImplementedError
If batch_to_labels not implemented
"""
labels = self.batch_to_labels(batch)
if not isinstance(labels, list):
raise ValidationError('Check requires object detection label to be a list with an entry for each '
'sample')
if len(labels) == 0:
raise ValidationError('Check requires object detection label to be a non-empty list')
if not isinstance(labels[0], torch.Tensor):
raise ValidationError('Check requires object detection label to be a list of torch.Tensor')
sample_idx = 0
# Find a non empty tensor to validate
while labels[sample_idx].shape[0] == 0:
sample_idx += 1
if sample_idx == len(labels):
return # No labels to validate
if len(labels[sample_idx].shape) != 2:
raise ValidationError('Check requires object detection label to be a list of 2D tensors')
if labels[sample_idx].shape[1] != 5:
raise ValidationError('Check requires object detection label to be a list of 2D tensors, when '
'each row has 5 columns: [class_id, x, y, width, height]')
if torch.min(labels[sample_idx]) < 0:
raise ValidationError('Found one of coordinates to be negative, check requires object detection '
'bounding box coordinates to be of format [class_id, x, y, width, height].')
if torch.max(labels[sample_idx][:, 0] % 1) > 0:
raise ValidationError('Class_id must be a positive integer. Object detection labels per image should '
'be a Bx5 tensor of format [class_id, x, y, width, height].')
@staticmethod
def validate_infered_batch_predictions(batch_predictions):
"""
Validate the infered predictions from the batch.
Parameters
----------
batch_predictions : t.Any
The infered predictions from the batch
Raises
------
ValidationError
If predictions format is invalid
runml_checksNotImplementedError
If infer_on_batch not implemented
"""
if not isinstance(batch_predictions, Sequence):
raise ValidationError('Check requires detection predictions to be a sequence with an entry for each'
' sample')
if len(batch_predictions) == 0:
raise ValidationError('Check requires detection predictions to be a non-empty sequence')
if not isinstance(batch_predictions[0], torch.Tensor):
raise ValidationError('Check requires detection predictions to be a sequence of torch.Tensor')
sample_idx = 0
# Find a non empty tensor to validate
while batch_predictions[sample_idx].shape[0] == 0:
sample_idx += 1
if sample_idx == len(batch_predictions):
return # No predictions to validate
if len(batch_predictions[sample_idx].shape) != 2:
raise ValidationError('Check requires detection predictions to be a sequence of 2D tensors')
if batch_predictions[sample_idx].shape[1] != 6:
raise ValidationError('Check requires detection predictions to be a sequence of 2D tensors, when '
'each row has 6 columns: [x, y, width, height, class_probability, class_id]')
if torch.min(batch_predictions[sample_idx]) < 0:
raise ValidationError('Found one of coordinates to be negative, Check requires object detection '
'bounding box predictions to be of format [x, y, width, height, confidence,'
' class_id]. ')
if torch.min(batch_predictions[sample_idx][:, 4]) < 0 or torch.max(batch_predictions[sample_idx][:, 4]) > 1:
raise ValidationError('Confidence must be between 0 and 1. Object detection predictions per image '
'should be a Bx6 tensor of format [x, y, width, height, confidence, class_id].')
if torch.max(batch_predictions[sample_idx][:, 5] % 1) > 0:
raise ValidationError('Class_id must be a positive integer. Object detection predictions per image '
'should be a Bx6 tensor of format [x, y, width, height, confidence, class_id].') | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/detection_data.py | 0.979036 | 0.918663 | detection_data.py | pypi |
"""Contains code for BatchWrapper."""
from operator import itemgetter
from typing import TYPE_CHECKING, Any, Callable, Iterable, Tuple, TypeVar, cast
import torch
from runml_checks.core import DatasetKind
from runml_checks.vision.task_type import TaskType
if TYPE_CHECKING:
from runml_checks.vision.context import Context
__all__ = ['Batch']
class Batch:
"""Represents dataset batch returned by the dataloader during iteration."""
def __init__(
self,
batch: Tuple[Iterable[Any], Iterable[Any]],
context: 'Context', # noqa
dataset_kind: DatasetKind,
batch_index: int
):
self._context = context
self._dataset_kind = dataset_kind
self.batch_index = batch_index
self._batch = apply_to_tensor(batch, lambda it: it.to(self._context.device))
self._labels = None
self._predictions = None
self._images = None
@property
def labels(self):
"""Return labels for the batch, formatted in runml_checks format."""
if self._labels is None:
dataset = self._context.get_data_by_kind(self._dataset_kind)
dataset.assert_labels_valid()
self._labels = dataset.batch_to_labels(self._batch)
return self._labels
def _do_static_pred(self):
preds = self._context.static_predictions[self._dataset_kind]
dataset = self._context.get_data_by_kind(self._dataset_kind)
indexes = list(dataset.data_loader.batch_sampler)[self.batch_index]
preds = itemgetter(*indexes)(preds)
if dataset.task_type == TaskType.CLASSIFICATION:
return torch.stack(preds)
return preds
@property
def predictions(self):
"""Return predictions for the batch, formatted in runml_checks format."""
if self._predictions is None:
dataset = self._context.get_data_by_kind(self._dataset_kind)
if self._context.static_predictions is not None:
self._context.assert_predictions_valid(self._dataset_kind)
self._predictions = self._do_static_pred()
else:
# Calling model will raise error if model was not given
# (assert_predictions_valid doesn't raise an error if no model was given)
model = self._context.model
self._context.assert_predictions_valid(self._dataset_kind)
self._predictions = dataset.infer_on_batch(self._batch, model, self._context.device)
return self._predictions
@property
def images(self):
"""Return images for the batch, formatted in runml_checks format."""
if self._images is None:
dataset = self._context.get_data_by_kind(self._dataset_kind)
dataset.assert_images_valid()
self._images = [image.astype('uint8') for image in dataset.batch_to_images(self._batch)]
return self._images
def __getitem__(self, index: int):
"""Return batch item by index."""
return self._batch[index]
def __len__(self):
"""Return length of batch."""
dataset = self._context.get_data_by_kind(self._dataset_kind)
return len(list(dataset.data_loader.batch_sampler)[self.batch_index])
T = TypeVar('T')
def apply_to_tensor(
x: T,
fn: Callable[[torch.Tensor], torch.Tensor]
) -> Any:
"""Apply provided function to tensor instances recursivly."""
if isinstance(x, torch.Tensor):
return cast(T, fn(x))
elif isinstance(x, (str, bytes, bytearray)):
return x
elif isinstance(x, (list, tuple, set)):
return type(x)(apply_to_tensor(it, fn) for it in x)
elif isinstance(x, dict):
return type(x)((k, apply_to_tensor(v, fn)) for k, v in x.items())
return x | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/batch_wrapper.py | 0.95165 | 0.408365 | batch_wrapper.py | pypi |
"""Module for vision base checks."""
from typing import Any, Dict, Mapping, Optional, Sequence, Union
import torch
from ignite.metrics import Metric
from torch import nn
from runml_checks.core.check_result import CheckResult
from runml_checks.core.checks import DatasetKind, ModelOnlyBaseCheck, SingleDatasetBaseCheck, TrainTestBaseCheck
from runml_checks.utils.ipython import ProgressBarGroup
from runml_checks.vision import deprecation_warnings # pylint: disable=unused-import # noqa: F401
from runml_checks.vision._shared_docs import docstrings
from runml_checks.vision.batch_wrapper import Batch
from runml_checks.vision.context import Context
from runml_checks.vision.vision_data import VisionData
__all__ = [
'SingleDatasetCheck',
'TrainTestCheck',
'ModelOnlyCheck',
]
class SingleDatasetCheck(SingleDatasetBaseCheck):
"""Parent class for checks that only use one dataset."""
context_type = Context
@docstrings
def run(
self,
dataset: VisionData,
model: Optional[nn.Module] = None,
model_name: str = '',
scorers: Optional[Mapping[str, Metric]] = None,
scorers_per_class: Optional[Mapping[str, Metric]] = None,
device: Union[str, torch.device, None] = None,
random_state: int = 42,
n_samples: Optional[int] = 10_000,
with_display: bool = True,
train_predictions: Optional[Dict[int, Union[Sequence[torch.Tensor], torch.Tensor]]] = None,
test_predictions: Optional[Dict[int, Union[Sequence[torch.Tensor], torch.Tensor]]] = None,
) -> CheckResult:
"""Run check.
Parameters
----------
dataset: VisionData
VisionData object to process
model: Optional[nn.Module] , default None
pytorch neural network module instance
{additional_context_params:2*indent}
"""
assert self.context_type is not None
with ProgressBarGroup() as progressbar_factory:
with progressbar_factory.create_dummy(name='Validating Input'):
# Context is copying the data object, then not using the original after the init
context: Context = self.context_type(
dataset,
model=model,
model_name=model_name,
scorers=scorers,
scorers_per_class=scorers_per_class,
device=device,
random_state=random_state,
n_samples=n_samples,
with_display=with_display,
train_predictions=train_predictions,
test_predictions=test_predictions,
)
self.initialize_run(context, DatasetKind.TRAIN)
context.train.init_cache()
for i, batch in enumerate(progressbar_factory.create(
iterable=context.train,
name='Ingesting Batches',
unit='Batch'
)):
batch = Batch(batch, context, DatasetKind.TRAIN, i)
context.train.update_cache(batch)
self.update(context, batch, DatasetKind.TRAIN)
with progressbar_factory.create_dummy(name='Computing Check', unit='Check'):
result = self.compute(context, DatasetKind.TRAIN)
context.finalize_check_result(result, self)
context.add_is_sampled_footnote(result, DatasetKind.TRAIN)
return result
def initialize_run(self, context: Context, dataset_kind: DatasetKind):
"""Initialize run before starting updating on batches. Optional."""
pass
def update(self, context: Context, batch: Any, dataset_kind: DatasetKind):
"""Update internal check state with given batch."""
raise NotImplementedError()
def compute(self, context: Context, dataset_kind: DatasetKind) -> CheckResult:
"""Compute final check result based on accumulated internal state."""
raise NotImplementedError()
class TrainTestCheck(TrainTestBaseCheck):
"""Parent class for checks that compare two datasets.
The class checks train dataset and test dataset for model training and test.
"""
context_type = Context
@docstrings
def run(
self,
train_dataset: VisionData,
test_dataset: VisionData,
model: Optional[nn.Module] = None,
model_name: str = '',
scorers: Optional[Mapping[str, Metric]] = None,
scorers_per_class: Optional[Mapping[str, Metric]] = None,
device: Union[str, torch.device, None] = None,
random_state: int = 42,
n_samples: Optional[int] = 10_000,
with_display: bool = True,
train_predictions: Optional[Dict[int, Union[Sequence[torch.Tensor], torch.Tensor]]] = None,
test_predictions: Optional[Dict[int, Union[Sequence[torch.Tensor], torch.Tensor]]] = None,
) -> CheckResult:
"""Run check.
Parameters
----------
train_dataset: VisionData
VisionData object, representing data an neural network was fitted on
test_dataset: VisionData
VisionData object, representing data an neural network predicts on
model: Optional[nn.Module] , default None
pytorch neural network module instance
{additional_context_params:2*indent}
"""
assert self.context_type is not None
with ProgressBarGroup() as progressbar_factory:
with progressbar_factory.create_dummy(name='Validating Input'):
# Context is copying the data object, then not using the original after the init
context: Context = self.context_type(
train_dataset,
test_dataset,
model=model,
model_name=model_name,
scorers=scorers,
scorers_per_class=scorers_per_class,
device=device,
random_state=random_state,
n_samples=n_samples,
with_display=with_display,
train_predictions=train_predictions,
test_predictions=test_predictions,
)
self.initialize_run(context)
train_pbar = progressbar_factory.create(
iterable=context.train,
name='Ingesting Batches - Train Dataset',
unit='Batch'
)
context.train.init_cache()
for i, batch in enumerate(train_pbar):
batch = Batch(batch, context, DatasetKind.TRAIN, i)
context.train.update_cache(batch)
self.update(context, batch, DatasetKind.TRAIN)
context.test.init_cache()
for i, batch in enumerate(progressbar_factory.create(
iterable=context.test,
name='Ingesting Batches - Test Dataset',
unit='Batch'
)):
batch = Batch(batch, context, DatasetKind.TEST, i)
context.test.update_cache(batch)
self.update(context, batch, DatasetKind.TEST)
with progressbar_factory.create_dummy(name='Computing Check', unit='Check'):
result = self.compute(context)
context.finalize_check_result(result, self)
context.add_is_sampled_footnote(result)
return result
def initialize_run(self, context: Context):
"""Initialize run before starting updating on batches. Optional."""
pass
def update(self, context: Context, batch: Any, dataset_kind: DatasetKind):
"""Update internal check state with given batch for either train or test."""
raise NotImplementedError()
def compute(self, context: Context) -> CheckResult:
"""Compute final check result based on accumulated internal state."""
raise NotImplementedError()
class ModelOnlyCheck(ModelOnlyBaseCheck):
"""Parent class for checks that only use a model and no datasets."""
context_type = Context
@docstrings
def run(
self,
model: nn.Module,
model_name: str = '',
scorers: Optional[Mapping[str, Metric]] = None,
scorers_per_class: Optional[Mapping[str, Metric]] = None,
device: Union[str, torch.device, None] = None,
random_state: int = 42,
n_samples: Optional[int] = None,
with_display: bool = True,
train_predictions: Optional[Dict[int, Union[Sequence[torch.Tensor], torch.Tensor]]] = None,
test_predictions: Optional[Dict[int, Union[Sequence[torch.Tensor], torch.Tensor]]] = None,
) -> CheckResult:
"""Run check.
Parameters
----------
model: nn.Module
pytorch neural network module instance
{additional_context_params:2*indent}
"""
assert self.context_type is not None
with ProgressBarGroup() as progressbar_factory:
with progressbar_factory.create_dummy(name='Validating Input'):
context: Context = self.context_type(
model=model,
model_name=model_name,
scorers=scorers,
scorers_per_class=scorers_per_class,
device=device,
random_state=random_state,
n_samples=n_samples,
with_display=with_display,
train_predictions=train_predictions,
test_predictions=test_predictions,
)
self.initialize_run(context)
with progressbar_factory.create_dummy(name='Computing Check', unit='Check'):
result = self.compute(context)
context.finalize_check_result(result, self)
return result
def initialize_run(self, context: Context):
"""Initialize run before starting updating on batches. Optional."""
pass
def compute(self, context: Context) -> CheckResult:
"""Compute final check result."""
raise NotImplementedError() | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/base_checks.py | 0.960007 | 0.397441 | base_checks.py | pypi |
"""Module for Metric Mixin."""
import typing as t
from abc import abstractmethod
import numpy as np
from runml_checks.vision.metrics_utils.iou_utils import compute_pairwise_ious, group_class_detection_label, jaccard_iou
class MetricMixin:
"""Metric util function mixin."""
@abstractmethod
def get_confidences(self, detections) -> t.List[float]:
"""Get detections object of single image and should return confidence for each detection."""
pass
@abstractmethod
def calc_pairwise_ious(self, detections, labels) -> t.Dict[int, np.ndarray]:
"""Get a single result from group_class_detection_label and return a matrix of IoUs."""
pass
@abstractmethod
def group_class_detection_label(self, detections, labels) -> t.Dict[t.Any, t.Dict[str, list]]:
"""Group detection and labels in dict of format {class_id: {'detected' [...], 'ground_truth': [...]}}."""
pass
@abstractmethod
def get_detection_areas(self, detections) -> t.List[int]:
"""Get detection object of single image and should return area for each detection."""
pass
@abstractmethod
def get_labels_areas(self, labels) -> t.List[int]:
"""Get labels object of single image and should return area for each label."""
pass
class ObjectDetectionMetricMixin(MetricMixin):
"""Metric util function mixin for object detection."""
def get_labels_areas(self, labels) -> t.List[int]:
"""Get labels object of single image and should return area for each label."""
return [bbox[3].item() * bbox[4].item() for bbox in labels]
def group_class_detection_label(self, detections, labels) -> t.Dict[t.Any, t.Dict[str, list]]:
"""Group detection and labels in dict of format {class_id: {'detected' [...], 'ground_truth': [...] }}."""
return group_class_detection_label(detections, labels)
def get_confidences(self, detections) -> t.List[float]:
"""Get detections object of single image and should return confidence for each detection."""
return [d[4].item() for d in detections]
def calc_pairwise_ious(self, detections, labels) -> np.ndarray:
"""Get a single result from group_class_detection_label and return a matrix of IoUs."""
return compute_pairwise_ious(detections, labels, jaccard_iou)
def get_detection_areas(self, detections) -> t.List[int]:
"""Get detection object of single image and should return area for each detection."""
return [d[2].item() * d[3].item() for d in detections] | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/metrics_utils/metric_mixin.py | 0.923152 | 0.578508 | metric_mixin.py | pypi |
"""Module for computing Intersection over Unions."""
from collections import defaultdict
import numpy as np
import torch
def jaccard_iou(dt: np.array, gt: np.array):
"""Calculate the jaccard IoU.
See https://en.wikipedia.org/wiki/Jaccard_index
Parameters
----------
dt: np.array
Single Detection in the shape of [x, y, width, height, confidence, class]
gt: np.array
Single Ground Truth in the shape of [class, x, y, width, height]
"""
x_dt, y_dt, w_dt, h_dt = dt[:4]
x_gt, y_gt, w_gt, h_gt = gt[1:]
x2_dt, y2_dt = x_dt + w_dt, y_dt + h_dt
x2_gt, y2_gt = x_gt + w_gt, y_gt + h_gt
# innermost left x
xi = x_dt if x_dt > x_gt else x_gt
# innermost right x
x2i = x2_dt if x2_dt < x2_gt else x2_gt
# same for y
yi = y_dt if y_dt > y_gt else y_gt
y2i = y2_dt if y2_dt < y2_gt else y2_gt
# calculate areas
dt_area = w_dt * h_dt
gt_area = w_gt * h_gt
iwidth = x2i - xi if x2i > xi else 0
ihight = y2i - yi if y2i > yi else 0
intersection = iwidth * ihight
return intersection / (dt_area + gt_area - intersection)
def compute_pairwise_ious(detected, ground_truth, iou_func):
"""Compute pairwise ious between detections and ground truth."""
ious = np.zeros((len(detected), len(ground_truth)))
for g_idx, g in enumerate(ground_truth):
for d_idx, d in enumerate(detected):
ious[d_idx, g_idx] = iou_func(d, g)
return ious
def group_class_detection_label(detected, ground_truth):
"""Group bounding detection and labels by class."""
class_bounding_boxes = defaultdict(lambda: {"detected": [], "ground_truth": []})
for single_detection in detected:
class_id = untorchify(single_detection[5])
class_bounding_boxes[class_id]["detected"].append(single_detection.cpu().detach().numpy())
for single_ground_truth in ground_truth:
class_id = untorchify(single_ground_truth[0])
class_bounding_boxes[class_id]["ground_truth"].append(single_ground_truth.cpu().detach().numpy())
return class_bounding_boxes
def compute_bounding_box_class_ious(detected, ground_truth):
"""Compute ious between bounding boxes of the same class."""
bb_info = group_class_detection_label(detected, ground_truth)
# Calculating pairwise IoUs per class
return {class_id: compute_pairwise_ious(info["detected"], info["ground_truth"], jaccard_iou)
for class_id, info in bb_info.items()}
def per_sample_mean_iou(predictions, labels):
"""Calculate mean iou for a single sample."""
mean_ious = []
for detected, ground_truth in zip(predictions, labels):
if len(ground_truth) == 0:
if len(detected) == 0:
mean_ious.append(1)
else:
mean_ious.append(0)
continue
elif len(detected) == 0:
mean_ious.append(0)
continue
ious = compute_bounding_box_class_ious(detected, ground_truth)
count = 0
sum_iou = 0
for _, cls_ious in ious.items():
# Find best fit for each detection
for detection in cls_ious:
sum_iou += max(detection, default=0)
count += 1
if count:
mean_ious.append(sum_iou / count)
else:
mean_ious.append(0)
return mean_ious
def untorchify(item):
"""If item is torch tensor do `.item()` else return item itself."""
if isinstance(item, torch.Tensor):
return item.cpu().item()
return item | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/metrics_utils/iou_utils.py | 0.934433 | 0.805881 | iou_utils.py | pypi |
"""Module for defining metrics for the vision module."""
import typing as t
from copy import copy
import numpy as np
import pandas as pd
import torch
from ignite.engine import Engine
from ignite.metrics import Metric, Precision, Recall
from runml_checks.core import DatasetKind
from runml_checks.core.errors import runml_checksNotSupportedError, runml_checksValueError
from runml_checks.vision.metrics_utils.detection_precision_recall import ObjectDetectionAveragePrecision
from runml_checks.vision.vision_data import TaskType, VisionData
__all__ = [
'get_scorers_list',
'calculate_metrics',
'metric_results_to_df',
'filter_classes_for_display',
]
def get_default_classification_scorers():
return {
'Precision': Precision(),
'Recall': Recall()
}
def get_default_object_detection_scorers() -> t.Dict[str, Metric]:
return {
'Average Precision': ObjectDetectionAveragePrecision(return_option='ap'),
'Average Recall': ObjectDetectionAveragePrecision(return_option='ar')
}
def get_scorers_list(
dataset: VisionData,
alternative_scorers: t.Dict[str, Metric] = None,
) -> t.Dict[str, Metric]:
"""Get scorers list according to model object and label column.
Parameters
----------
dataset : VisionData
Dataset object
alternative_scorers : t.Dict[str, Metric], default: None
Alternative scorers dictionary
Returns
-------
t.Dict[str, Metric]
Scorers list
"""
task_type = dataset.task_type
if alternative_scorers:
# For alternative scorers we create a copy since in suites we are running in parallel, so we can't use the same
# instance for several checks.
scorers = {}
for name, met in alternative_scorers.items():
# Validate that each alternative scorer is a correct type
if not isinstance(met, Metric):
raise runml_checksValueError('alternative_scorers should contain metrics of type ignite.Metric')
met.reset()
scorers[name] = copy(met)
return scorers
elif task_type == TaskType.CLASSIFICATION:
scorers = get_default_classification_scorers()
elif task_type == TaskType.OBJECT_DETECTION:
scorers = get_default_object_detection_scorers()
else:
raise runml_checksNotSupportedError(f'No scorers match task_type {task_type}')
return scorers
def calculate_metrics(
metrics: t.Dict[str, Metric],
dataset: VisionData,
model: torch.nn.Module,
device: torch.device
) -> t.Dict[str, float]:
"""Calculate a list of ignite metrics on a given model and dataset.
Parameters
----------
metrics : Dict[str, Metric]
List of ignite metrics to calculate
dataset : VisionData
Dataset object
model : nn.Module
Model object
device : Union[str, torch.device, None]
Returns
-------
t.Dict[str, float]
Dictionary of metrics with the metric name as key and the metric value as value
"""
def process_function(_, batch):
return dataset.infer_on_batch(batch, model, device), dataset.batch_to_labels(batch)
engine = Engine(process_function)
for name, metric in metrics.items():
metric.reset()
metric.attach(engine, name)
state = engine.run(dataset.data_loader)
return state.metrics
def _validate_metric_type(metric_name: str, score: t.Any) -> bool:
"""Raise error if metric has incorrect type, or return true."""
if not isinstance(score, (torch.Tensor, list, np.ndarray)):
raise runml_checksValueError(f'The metric {metric_name} returned a '
f'{type(score)} instead of an array/tensor')
return True
def metric_results_to_df(results: dict, dataset: VisionData) -> pd.DataFrame:
"""Get dict of metric name to tensor of classes scores, and convert it to dataframe."""
# The data might contain fewer classes than the model was trained on. filtering out any class id which is not
# presented in the data.
data_classes = dataset.classes_indices.keys()
per_class_result = [
[metric, class_id, dataset.label_id_to_name(class_id),
class_score.item() if isinstance(class_score, torch.Tensor) else class_score]
for metric, score in results.items()
if _validate_metric_type(metric, score)
# scorer returns results as array, containing result per class
for class_id, class_score in enumerate(score)
if not np.isnan(class_score) and class_id in data_classes
]
return pd.DataFrame(per_class_result, columns=['Metric',
'Class',
'Class Name',
'Value']).sort_values(by=['Metric', 'Class'])
def filter_classes_for_display(metrics_df: pd.DataFrame,
metric_to_show_by: str,
n_to_show: int,
show_only: str,
column_to_filter_by: str = 'Dataset',
column_filter_value: str = None) -> list:
"""Filter the metrics dataframe for display purposes.
Parameters
----------
metrics_df : pd.DataFrame
Dataframe containing the metrics.
n_to_show : int
Number of classes to show in the report.
show_only : str
Specify which classes to show in the report. Can be one of the following:
- 'largest': Show the largest classes.
- 'smallest': Show the smallest classes.
- 'random': Show random classes.
- 'best': Show the classes with the highest score.
- 'worst': Show the classes with the lowest score.
metric_to_show_by : str
Specify the metric to sort the results by. Relevant only when show_only is 'best' or 'worst'.
column_to_filter_by : str , default: 'Dataset'
Specify the name of the column to filter by.
column_filter_value : str , default: None
Specify the value of the column to filter by, if None will be set to test dataset name.
Returns
-------
list
List of classes to show in the report.
"""
# working on the test dataset on default
if column_filter_value is None:
column_filter_value = DatasetKind.TEST.value
tests_metrics_df = metrics_df[(metrics_df[column_to_filter_by] == column_filter_value) &
(metrics_df['Metric'] == metric_to_show_by)]
if show_only == 'largest':
tests_metrics_df = tests_metrics_df.sort_values(by='Number of samples', ascending=False)
elif show_only == 'smallest':
tests_metrics_df = tests_metrics_df.sort_values(by='Number of samples', ascending=True)
elif show_only == 'random':
tests_metrics_df = tests_metrics_df.sample(frac=1)
elif show_only == 'best':
tests_metrics_df = tests_metrics_df.sort_values(by='Value', ascending=False)
elif show_only == 'worst':
tests_metrics_df = tests_metrics_df.sort_values(by='Value', ascending=True)
else:
raise ValueError(f'Unknown show_only value: {show_only}')
return tests_metrics_df.head(n_to_show)['Class'].to_list() | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/metrics_utils/metrics.py | 0.954995 | 0.531392 | metrics.py | pypi |
"""Module for custom scorer metric."""
import typing as t
import numpy as np
import torch
from ignite.metrics import Metric
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
class CustomScorer(Metric):
"""Metric that runs a custom scorer in the compute on the y, y_pred (can work with sklearn scorers).
Parameters
----------
score_func: Callable, default: None
Score function (or loss function) with signature `score_func(y_true, y_pred, **kwargs)`
needs_proba: bool, default: False
Whether score_func requires the probabilites or not.
**kwargs
Additional parameters to be passed to score_func.
Examples
--------
>>> from sklearn.metrics import cohen_kappa_score
... from runml_checks.vision.metrics_utils.custom_scorer import CustomScorer
... from runml_checks.vision.checks.model_evaluation import SingleDatasetScalarPerformance
... from runml_checks.vision.datasets.classification import mnist
...
... mnist_model = mnist.load_model()
... test_ds = mnist.load_dataset(train=True, object_type='VisionData')
...
>>> ck = CustomScorer(cohen_kappa_score)
...
>>> check = SingleDatasetScalarPerformance(ck, metric_name='cohen_kappa_score')
... check.run(test_ds, mnist_model).value
"""
def __init__(
self,
score_func: t.Callable,
needs_proba: bool = False,
**kwargs
):
super().__init__(device="cpu")
self.score_func = score_func
self.needs_proba = needs_proba
self.kwargs = kwargs
@reinit__is_reduced
def reset(self):
"""Reset metric state."""
self._y_pred = []
self._y = []
super().reset()
@reinit__is_reduced
def update(self, output):
"""Update metric with batch of samples."""
y_pred, y = output
if isinstance(y_pred, torch.Tensor):
y_pred = y_pred.cpu().detach().numpy()
else:
y_pred = np.array(y_pred)
if isinstance(y, torch.Tensor):
y = y.cpu().detach().numpy()
else:
y = np.array(y)
if not self.needs_proba:
y_pred = np.argmax(y_pred, axis=-1)
self._y_pred.append(y_pred)
self._y.append(y)
@sync_all_reduce("_y_pred", "_y")
def compute(self):
"""Compute metric value."""
y_pred = np.concatenate(self._y_pred)
y = np.concatenate(self._y)
return self.score_func(y, y_pred, **self.kwargs) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/metrics_utils/custom_scorer.py | 0.944074 | 0.353958 | custom_scorer.py | pypi |
"""Module of ImagePropertyOutliers check."""
import typing as t
import numpy as np
from runml_checks.vision import Batch, VisionData
from runml_checks.vision.checks.data_integrity.abstract_property_outliers import AbstractPropertyOutliers
from runml_checks.vision.utils.image_properties import default_image_properties
__all__ = ['ImagePropertyOutliers']
class ImagePropertyOutliers(AbstractPropertyOutliers):
"""Find outliers images with respect to the given properties.
The check computes several image properties and then computes the number of outliers for each property.
The check uses `IQR <https://en.wikipedia.org/wiki/Interquartile_range#Outliers>`_ to detect outliers out of the
single dimension properties.
Parameters
----------
image_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default runml_checks properties.
Each property is dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str),
representing attributes of said method. 'output_type' must be one of:
- 'numeric' - for continuous ordinal outputs.
- 'categorical' - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the :ref:`property guide </user-guide/vision/vision_properties.rst>`
n_show_top : int , default: 5
number of outliers to show from each direction (upper limit and bottom limit)
iqr_percentiles: Tuple[int, int], default: (25, 75)
Two percentiles which define the IQR range
iqr_scale: float, default: 1.5
The scale to multiply the IQR range for the outliers detection
"""
def __init__(self,
image_properties: t.List[t.Dict[str, t.Any]] = None,
n_show_top: int = 5,
iqr_percentiles: t.Tuple[int, int] = (25, 75),
iqr_scale: float = 1.5,
**kwargs):
super().__init__(properties=image_properties, n_show_top=n_show_top, iqr_percentiles=iqr_percentiles,
iqr_scale=iqr_scale, **kwargs)
def get_relevant_data(self, batch: Batch):
"""Get the data on which the check calculates outliers for."""
return batch.images
def draw_image(self, data: VisionData, sample_index: int, index_of_value_in_sample: int,
num_properties_in_sample: int) -> np.ndarray:
"""Return an image to show as output of the display.
Parameters
----------
data : VisionData
The vision data object used in the check.
sample_index : int
The batch index of the sample to draw the image for.
index_of_value_in_sample : int
Each sample property is list, then this is the index of the outlier in the sample property list.
num_properties_in_sample
The number of values in the sample's property list.
"""
return data.batch_to_images(data.batch_of_index(sample_index))[0]
def get_default_properties(self, data: VisionData):
"""Return default properties to run in the check."""
return default_image_properties | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/checks/data_integrity/image_property_outliers.py | 0.91967 | 0.73179 | image_property_outliers.py | pypi |
"""Module contains LabelPropertyOutliers check."""
import typing as t
import numpy as np
from runml_checks.core.errors import runml_checksProcessError
from runml_checks.vision import Batch
from runml_checks.vision.checks.data_integrity.abstract_property_outliers import AbstractPropertyOutliers
from runml_checks.vision.utils import label_prediction_properties
from runml_checks.vision.utils.image_functions import draw_bboxes
from runml_checks.vision.vision_data import TaskType, VisionData
__all__ = ['LabelPropertyOutliers']
class LabelPropertyOutliers(AbstractPropertyOutliers):
"""Find outliers labels with respect to the given properties.
The check computes several label properties and then computes the number of outliers for each property.
The check uses `IQR <https://en.wikipedia.org/wiki/Interquartile_range#Outliers>`_ to detect outliers out of the
single dimension properties.
Parameters
----------
label_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default runml_checks properties.
Each property is dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str),
representing attributes of said method. 'output_type' must be one of:
- 'numeric' - for continuous ordinal outputs.
- 'categorical' - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the :ref:`property guide </user-guide/vision/vision_properties.rst>`
n_show_top : int , default: 5
number of outliers to show from each direction (upper limit and bottom limit)
iqr_percentiles: Tuple[int, int], default: (25, 75)
Two percentiles which define the IQR range
iqr_scale: float, default: 1.5
The scale to multiply the IQR range for the outliers detection
"""
def __init__(self,
label_properties: t.List[t.Dict[str, t.Any]] = None,
n_show_top: int = 5,
iqr_percentiles: t.Tuple[int, int] = (25, 75),
iqr_scale: float = 1.5,
**kwargs):
super().__init__(properties=label_properties, n_show_top=n_show_top, iqr_percentiles=iqr_percentiles,
iqr_scale=iqr_scale, **kwargs)
def get_default_properties(self, data: VisionData):
"""Return default properties to run in the check."""
if data.task_type == TaskType.CLASSIFICATION:
raise runml_checksProcessError('task type classification does not have default label '
'properties for label outliers.')
elif data.task_type == TaskType.OBJECT_DETECTION:
return label_prediction_properties.DEFAULT_OBJECT_DETECTION_LABEL_PROPERTIES
else:
raise runml_checksProcessError(f'task type {data.task_type} does not have default label '
f'properties defined.')
def get_relevant_data(self, batch: Batch):
"""Get the data on which the check calculates outliers for."""
return batch.labels
def draw_image(self, data: VisionData, sample_index: int, index_of_value_in_sample: int,
num_properties_in_sample: int) -> np.ndarray:
"""Return an image to show as output of the display.
Parameters
----------
data : VisionData
The vision data object used in the check.
sample_index : int
The batch index of the sample to draw the image for.
index_of_value_in_sample : int
Each sample property is list, then this is the index of the outlier in the sample property list.
num_properties_in_sample
The number of values in the sample's property list.
"""
batch = data.batch_of_index(sample_index)
image = data.batch_to_images(batch)[0]
if data.task_type == TaskType.OBJECT_DETECTION:
label = data.batch_to_labels(batch)[0]
# If we have same number of values for sample as the number of bboxes in label, we assume that the
# property returns value per bounding box, so we filter only the relevant bounding box
if num_properties_in_sample > 1 and num_properties_in_sample == len(label):
label = label[index_of_value_in_sample].unsqueeze(dim=0)
image = draw_bboxes(image, label, copy_image=False, border_width=5)
return image | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/checks/data_integrity/label_property_outliers.py | 0.933979 | 0.733237 | label_property_outliers.py | pypi |
"""Module containing class performance check."""
import typing as t
from collections import defaultdict
import pandas as pd
import torch
from runml_checks import CheckFailure
from runml_checks.core import CheckResult, DatasetKind
from runml_checks.core.errors import runml_checksProcessError, runml_checksValueError
from runml_checks.utils.performance.error_model import error_model_display_dataframe, model_error_contribution
from runml_checks.utils.single_sample_metrics import per_sample_cross_entropy
from runml_checks.vision import Batch, Context, TrainTestCheck
from runml_checks.vision.metrics_utils.iou_utils import per_sample_mean_iou
from runml_checks.vision.utils.image_properties import default_image_properties, validate_properties
from runml_checks.vision.vision_data import TaskType
__all__ = ['ModelErrorAnalysis']
class ModelErrorAnalysis(TrainTestCheck):
"""Find the properties that best split the data into segments of high and low model error.
The check trains a regression model to predict the error of the user's model. Then, the properties scoring the
highest feature importance for the error regression model are selected and the distribution of the error vs the
property values is plotted. The check results are shown only if the error regression model manages to predict the
error well enough.
Parameters
----------
image_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default runml_checks properties.
Each property is dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str),
representing attributes of said method. 'output_type' must be one of:
- 'numeric' - for continuous ordinal outputs.
- 'categorical' - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the :ref:`property guide </user-guide/vision/vision_properties.rst>`
max_properties_to_show : int , default: 3
maximal number of properties to show error distribution for.
min_property_contribution : float , default: 0.15
minimum feature importance of a property to the error regression model
in order to show the property.
min_error_model_score : float , default: 0.5
minimum r^2 score of the error regression model for displaying the check.
min_segment_size : float , default: 0.05
minimal fraction of data that can comprise a weak segment.
n_display_samples : int , default: 5_000
number of samples to display in scatter plot.
random_state : int, default: 42
random seed for all check internals.
"""
def __init__(self,
image_properties: t.List[t.Dict[str, t.Any]] = None,
max_properties_to_show: int = 20,
min_property_contribution: float = 0.15,
min_error_model_score: float = 0.5,
min_segment_size: float = 0.05,
n_display_samples: int = 5_000,
random_state: int = 42,
**kwargs):
super().__init__(**kwargs)
self.random_state = random_state
self.min_error_model_score = min_error_model_score
self.min_segment_size = min_segment_size
self.max_properties_to_show = max_properties_to_show
self.min_property_contribution = min_property_contribution
self.n_display_samples = n_display_samples
self._train_properties = None
self._test_properties = None
self._train_scores = None
self._test_scores = None
if image_properties is None:
self.image_properties = default_image_properties
else:
self.image_properties = validate_properties(image_properties)
def initialize_run(self, context: Context):
"""Initialize property and score lists."""
context.assert_task_type(TaskType.CLASSIFICATION, TaskType.OBJECT_DETECTION)
self._train_properties = defaultdict(list)
self._test_properties = defaultdict(list)
self._train_scores = []
self._test_scores = []
def update(self, context: Context, batch: Batch, dataset_kind):
"""Accumulate property data of images and scores."""
if dataset_kind == DatasetKind.TRAIN:
dataset = context.train
properties = self._train_properties
scores = self._train_scores
elif dataset_kind == DatasetKind.TEST:
dataset = context.test
properties = self._test_properties
scores = self._test_scores
else:
raise RuntimeError(
'Internal Error! Part of code that must '
'be unreacheable was reached.'
)
images = batch.images
predictions = batch.predictions
labels = batch.labels
for single_property in self.image_properties:
properties[single_property['name']].extend(single_property['method'](images))
if dataset.task_type == TaskType.CLASSIFICATION:
def scoring_func(predictions, labels):
return per_sample_cross_entropy(labels, predictions)
elif dataset.task_type == TaskType.OBJECT_DETECTION:
def scoring_func(predictions, labels):
return per_sample_mean_iou(predictions, labels)
else:
raise runml_checksValueError(f'Should not reach here! Unsupported task type {dataset.task_type}')
if isinstance(predictions, torch.Tensor):
predictions = predictions.cpu().detach().numpy()
if isinstance(labels, torch.Tensor):
labels = labels.cpu().detach().numpy()
# get score using scoring_function
scores.extend(scoring_func(predictions, labels))
def compute(self, context: Context) -> CheckResult:
"""Find segments that contribute to model error.
Returns
-------
CheckResult:
value: dictionary of details for each property segment that split the effect on the error of the model
display: plots of results
"""
# build dataframe of properties and scores
train_property_df = pd.DataFrame(self._train_properties).dropna(axis=1, how='all')
test_property_df = pd.DataFrame(self._test_properties)[train_property_df.columns]
try:
error_fi, error_model_predicted = \
model_error_contribution(train_property_df,
pd.Series(self._train_scores),
test_property_df,
pd.Series(self._test_scores),
train_property_df.columns.to_list(),
[],
min_error_model_score=self.min_error_model_score,
random_state=self.random_state)
except runml_checksProcessError as e:
return CheckFailure(self, e)
display, value = error_model_display_dataframe(error_fi,
error_model_predicted,
test_property_df,
[],
self.max_properties_to_show,
self.min_property_contribution,
self.n_display_samples,
self.min_segment_size,
self.random_state,
context.with_display)
headnote = """<span>
The following graphs show the distribution of error for top properties that are most useful for
distinguishing high error samples from low error samples.
</span>"""
display = [headnote] + display if display else None
return CheckResult(value, display=display) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/checks/model_evaluation/model_error_analysis.py | 0.9357 | 0.508056 | model_error_analysis.py | pypi |
"""Module containing mean average precision report check."""
import math
from typing import Tuple, TypeVar
import numpy as np
import pandas as pd
import plotly.express as px
from runml_checks.core import CheckResult, ConditionResult, DatasetKind
from runml_checks.core.condition import ConditionCategory
from runml_checks.utils.strings import format_number
from runml_checks.vision import Batch, Context, SingleDatasetCheck
from runml_checks.vision.metrics_utils.detection_precision_recall import ObjectDetectionAveragePrecision
from runml_checks.vision.vision_data import TaskType
__all__ = ['MeanAveragePrecisionReport']
MPR = TypeVar('MPR', bound='MeanAveragePrecisionReport')
class MeanAveragePrecisionReport(SingleDatasetCheck):
"""Summarize mean average precision metrics on a dataset and model per IoU and bounding box area.
Parameters
----------
area_range: tuple, default: (32**2, 96**2)
Slices for small/medium/large buckets.
"""
def __init__(self, area_range: Tuple = (32**2, 96**2), **kwargs):
super().__init__(**kwargs)
self.area_range = area_range
def initialize_run(self, context: Context, dataset_kind: DatasetKind = None):
"""Initialize run by asserting task type and initializing metric."""
context.assert_task_type(TaskType.OBJECT_DETECTION)
self._ap_metric = ObjectDetectionAveragePrecision(return_option=None, area_range=self.area_range)
def update(self, context: Context, batch: Batch, dataset_kind: DatasetKind):
"""Update the metrics by passing the batch to ignite metric update method."""
label = batch.labels
prediction = batch.predictions
self._ap_metric.update((prediction, label))
def compute(self, context: Context, dataset_kind: DatasetKind) -> CheckResult:
"""Compute the metric result using the ignite metrics compute method and create display."""
small_area = int(math.sqrt(self.area_range[0]))
large_area = int(math.sqrt(self.area_range[1]))
res = self._ap_metric.compute()[0]['precision']
rows = []
for title, area_name in zip(['All',
f'Small (area < {small_area}^2)',
f'Medium ({small_area}^2 < area < {large_area}^2)',
f'Large (area < {large_area}^2)'],
['all', 'small', 'medium', 'large']):
rows.append([
title,
self._ap_metric.get_classes_scores_at(res, area=area_name, max_dets=100),
self._ap_metric.get_classes_scores_at(res, iou=0.5, area=area_name, max_dets=100),
self._ap_metric.get_classes_scores_at(res, iou=0.75, area=area_name, max_dets=100)
])
results = pd.DataFrame(data=rows, columns=['Area size', 'mAP@[.50::.95] (avg.%)', 'mAP@.50 (%)', 'mAP@.75 (%)'])
results = results.set_index('Area size')
if context.with_display:
filtered_res = self._ap_metric.filter_res(res, area='all', max_dets=100)
filtered_res_shape = filtered_res.shape
filtered_res = np.reshape(filtered_res, (filtered_res_shape[0], filtered_res_shape[3]))
mean_res = np.zeros(filtered_res_shape[0])
for i in range(filtered_res_shape[0]):
mean_res[i] = np.nanmean(filtered_res[i][filtered_res[i] > -1])
data = {
'IoU threshold': self._ap_metric.iou_thresholds,
'mAP (%)': mean_res
}
df = pd.DataFrame.from_dict(data)
fig = px.line(df, x='IoU threshold', y='mAP (%)',
title='Mean Average Precision over increasing IoU thresholds')
display = [results, fig]
else:
display = None
return CheckResult(value=results, display=display)
def add_condition_mean_average_precision_greater_than(self: MPR, min_score: float) -> MPR:
"""Add condition - mAP scores in different area thresholds is greater than given score.
Parameters
----------
min_score : float
Minimum score to pass the check.
"""
def condition(df: pd.DataFrame):
min_col_per_row = df.idxmin(axis=1)
min_score_per_row = [df.loc[r, c] for r, c in min_col_per_row.items()]
loc_min_row = np.argmin(min_score_per_row)
score = min_score_per_row[loc_min_row]
area = min_col_per_row.index[loc_min_row]
iou = min_col_per_row[loc_min_row]
category = ConditionCategory.PASS if score > min_score else ConditionCategory.FAIL
details = f'Found lowest score of {format_number(score)} for area {area} and IoU {iou}'
return ConditionResult(category, details)
return self.add_condition(f'Scores are greater than {min_score}', condition)
def add_condition_average_mean_average_precision_greater_than(self: MPR, min_score: float = 0.3) -> MPR:
"""Add condition - average mAP for IoU values between 0.5 to 0.9 in all areas is greater than given score.
Parameters
----------
min_score : float
Minimum score to pass the check.
"""
def condition(df: pd.DataFrame):
df = df.reset_index()
value = df.loc[df['Area size'] == 'All', :]['mAP@[.50::.95] (avg.%)'][0]
details = f'mAP score is: {format_number(value)}'
category = ConditionCategory.PASS if value > min_score else ConditionCategory.FAIL
return ConditionResult(category, details)
return self.add_condition(f'mAP score is greater than {min_score}', condition) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/checks/model_evaluation/mean_average_precision_report.py | 0.971524 | 0.497009 | mean_average_precision_report.py | pypi |
"""Module containing simple comparison check."""
from typing import Any, Dict, Hashable, List
import numpy as np
import pandas as pd
import plotly.express as px
import torch
from ignite.metrics import Fbeta
from runml_checks.core import CheckResult, ConditionCategory, ConditionResult, DatasetKind
from runml_checks.core.errors import runml_checksValueError
from runml_checks.utils import plot
from runml_checks.utils.metrics import get_gain
from runml_checks.utils.strings import format_percent
from runml_checks.vision import Batch, Context, TrainTestCheck
from runml_checks.vision.metrics_utils import get_scorers_list, metric_results_to_df
from runml_checks.vision.metrics_utils.metrics import filter_classes_for_display
from runml_checks.vision.vision_data import TaskType
__all__ = ['SimpleModelComparison']
_allowed_strategies = (
'most_frequent',
'prior',
'stratified',
'uniform'
)
class SimpleModelComparison(TrainTestCheck):
"""Compare given model score to simple model score (according to given model type).
For classification models, the simple model is a dummy classifier the selects the predictions based on a strategy.
Parameters
----------
strategy : str, default='prior'
Strategy to use to generate the predictions of the simple model.
* 'most_frequent' : The most frequent label in the training set is predicted.
The probability vector is 1 for the most frequent label and 0 for the other predictions.
* 'prior' : The probability vector always contains the empirical class prior distribution (i.e. the class
distribution observed in the training set).
* 'stratified' : The predictions are generated by sampling one-hot vectors from a multinomial distribution
parametrized by the empirical class prior probabilities.
* 'uniform' : Generates predictions uniformly at random from the list of unique classes observed in y,
i.e. each class has equal probability. The predicted class is chosen randomly.
alternative_metrics : Dict[str, Metric], default: None
A dictionary of metrics, where the key is the metric name and the value is an ignite.Metric object whose score
should be used. If None are given, use the default metrics.
n_to_show : int, default: 20
Number of classes to show in the report. If None, show all classes.
show_only : str, default: 'largest'
Specify which classes to show in the report. Can be one of the following:
- 'largest': Show the largest classes.
- 'smallest': Show the smallest classes.
- 'random': Show random classes.
- 'best': Show the classes with the highest score.
- 'worst': Show the classes with the lowest score.
metric_to_show_by : str, default: None
Specify the metric to sort the results by. Relevant only when show_only is 'best' or 'worst'.
If None, sorting by the first metric in the default metrics list.
class_list_to_show: List[int], default: None
Specify the list of classes to show in the report. If specified, n_to_show, show_only and metric_to_show_by
are ignored.
"""
_state: Dict[Hashable, Any] = {}
def __init__(self,
strategy: str = 'most_frequent',
alternative_metrics=None,
n_to_show: int = 20,
show_only: str = 'largest',
metric_to_show_by: str = None,
class_list_to_show: List[int] = None,
**kwargs):
super().__init__(**kwargs)
self.strategy = strategy
if self.strategy not in _allowed_strategies:
raise runml_checksValueError(
f'Unknown strategy type: {self.strategy}, expected one of{_allowed_strategies}.'
)
self.alternative_metrics = alternative_metrics
self.n_to_show = n_to_show
self.class_list_to_show = class_list_to_show
if self.class_list_to_show is None:
if show_only not in ['largest', 'smallest', 'random', 'best', 'worst']:
raise runml_checksValueError(f'Invalid value for show_only: {show_only}. Should be one of: '
f'["largest", "smallest", "random", "best", "worst"]')
self.show_only = show_only
if alternative_metrics is not None and show_only in ['best', 'worst'] and metric_to_show_by is None:
raise runml_checksValueError('When alternative_metrics are provided and show_only is one of: '
'["best", "worst"], metric_to_show_by must be specified.')
self.metric_to_show_by = metric_to_show_by
self._test_metrics = None
self._perfect_metrics = None
def initialize_run(self, context: Context):
"""Initialize the metrics for the check, and validate task type is relevant."""
context.assert_task_type(TaskType.CLASSIFICATION)
if self.alternative_metrics is None:
self._test_metrics = {'F1': Fbeta(beta=1, average=False)}
self._perfect_metrics = {'F1': Fbeta(beta=1, average=False)}
else:
self._test_metrics = get_scorers_list(context.train, self.alternative_metrics)
self._perfect_metrics = get_scorers_list(context.train, self.alternative_metrics)
def update(self, context: Context, batch: Batch, dataset_kind: DatasetKind):
"""Update the metrics for the check."""
if dataset_kind == DatasetKind.TEST and context.train.task_type == TaskType.CLASSIFICATION:
label = batch.labels
prediction = batch.predictions
for _, metric in self._test_metrics.items():
metric.update((prediction, label))
# calculating perfect scores
n_of_classes = batch.predictions.cpu().detach().shape[1]
perfect_predictions = np.eye(n_of_classes)[label.cpu().detach().numpy()]
for _, metric in self._perfect_metrics.items():
metric.update((torch.Tensor(perfect_predictions).to(context.device), label))
def compute(self, context: Context) -> CheckResult:
"""Compute the metrics for the check."""
results = []
metrics_to_eval = {
'Given Model': self._test_metrics,
'Perfect Model': self._perfect_metrics,
'Simple Model': self._generate_simple_model_metrics(context.train, context.test)
}
for name, metrics in metrics_to_eval.items():
dataset = context.get_data_by_kind(DatasetKind.TEST)
metrics_df = metric_results_to_df(
{k: m.compute() for k, m in metrics.items()}, dataset
)
metrics_df['Model'] = name
metrics_df['Number of samples'] = metrics_df['Class'].map(dataset.n_of_samples_per_class.get)
results.append(metrics_df)
results_df = pd.concat(results)
results_df = results_df[['Model', 'Metric', 'Class', 'Class Name', 'Number of samples', 'Value']]
results_df.dropna(inplace=True)
results_df.sort_values(by=['Model', 'Value'], ascending=False, inplace=True)
results_df.reset_index(drop=True, inplace=True)
if context.with_display:
if not self.metric_to_show_by:
self.metric_to_show_by = list(self._test_metrics.keys())[0]
if self.class_list_to_show is not None:
display_df = results_df.loc[results_df['Class'].isin(self.class_list_to_show)]
elif self.n_to_show is not None:
rows = results_df['Class'].isin(filter_classes_for_display(
results_df.loc[results_df['Model'] != 'Perfect Model'],
self.metric_to_show_by,
self.n_to_show,
self.show_only,
column_to_filter_by='Model',
column_filter_value='Given Model'
))
display_df = results_df.loc[rows]
else:
display_df = results_df
fig = (
px.histogram(
display_df.loc[results_df['Model'] != 'Perfect Model'],
x='Class Name',
y='Value',
color='Model',
color_discrete_sequence=(plot.colors['Generated'], plot.colors['Baseline']),
barmode='group',
facet_col='Metric',
facet_col_spacing=0.05,
hover_data=['Number of samples'],
title=f'Simple Model (Strategy: {self.strategy}) vs. Given Model')
.update_xaxes(title=None, type='category')
.update_yaxes(title=None, matches=None)
.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))
.for_each_yaxis(lambda yaxis: yaxis.update(showticklabels=True))
)
else:
fig = None
return CheckResult(
results_df,
header='Simple Model Comparison',
display=fig
)
def _generate_simple_model_metrics(self, train, test):
class_prior = np.zeros(train.num_classes)
n_samples = 0
for label, total in train.n_of_samples_per_class.items():
class_prior[label] = total
n_samples += total
class_prior /= n_samples
if self.strategy == 'most_frequent':
dummy_prediction = np.zeros(train.num_classes)
dummy_prediction[np.argmax(class_prior)] = 1
dummy_predictor = lambda: torch.from_numpy(dummy_prediction)
elif self.strategy == 'prior':
dummy_predictor = lambda: torch.from_numpy(class_prior)
elif self.strategy == 'stratified':
dummy_predictor = lambda: torch.from_numpy(np.random.multinomial(1, class_prior))
elif self.strategy == 'uniform':
dummy_predictor = lambda: torch.from_numpy(np.ones(train.num_classes) / train.num_classes)
else:
raise runml_checksValueError(
f'Unknown strategy type: {self.strategy}, expected one of {_allowed_strategies}.'
)
# Create dummy predictions
dummy_predictions = []
labels = []
for label, count in test.n_of_samples_per_class.items():
labels += [label] * count
for _ in range(count):
dummy_predictions.append(dummy_predictor())
# Get scorers
if self.alternative_metrics is None:
metrics = {'F1': Fbeta(beta=1, average=False)}
else:
metrics = get_scorers_list(train, self.alternative_metrics)
for _, metric in metrics.items():
metric.update((torch.stack(dummy_predictions), torch.LongTensor(labels)))
return metrics
def add_condition_gain_greater_than(self,
min_allowed_gain: float = 0.1,
max_gain: float = 50,
classes: List[Hashable] = None,
average: bool = False):
"""Add condition - require gain between the model and the simple model to be greater than threshold.
Parameters
----------
min_allowed_gain : float , default: 0.1
Minimum allowed gain between the model and the simple model -
gain is: difference in performance / (perfect score - simple score)
max_gain : float , default: 50
the maximum value for the gain value, limits from both sides [-max_gain, max_gain]
classes : List[Hashable] , default: None
Used in classification models to limit condition only to given classes.
average : bool , default: False
Used in classification models to flag if to run condition on average of classes, or on
each class individually
"""
name = f'Model performance gain over simple model is greater than {format_percent(min_allowed_gain)}'
if classes:
name = name + f' for classes {str(classes)}'
return self.add_condition(name,
calculate_condition_logic,
include_classes=classes,
min_allowed_gain=min_allowed_gain,
max_gain=max_gain,
average=average)
def calculate_condition_logic(result, include_classes=None, average=False, max_gain=None,
min_allowed_gain=None) -> ConditionResult:
scores = result.loc[result['Model'] == 'Given Model']
perfect_scores = result.loc[result['Model'] == 'Perfect Model']
simple_scores = result.loc[result['Model'] == 'Simple Model']
metrics = scores['Metric'].unique()
# Save min gain info to print when condition pass
min_gain = (np.inf, '')
def update_min_gain(gain, metric, class_name=None):
nonlocal min_gain
if gain < min_gain[0]:
message = f'Found minimal gain of {format_percent(gain)} for metric {metric}'
if class_name:
message += f' and class {class_name}'
min_gain = gain, message
fails = {}
if not average:
for metric in metrics:
failed_classes = {}
for _, scores_row in scores.loc[scores['Metric'] == metric].iterrows():
curr_class = scores_row['Class']
curr_class_name = scores_row['Class Name']
curr_value = scores_row['Value']
if include_classes and curr_class not in include_classes:
continue
perfect = perfect_scores.loc[(perfect_scores['Metric'] == metric) &
(perfect_scores['Class'] == curr_class)]['Value'].values[0]
if curr_value == perfect:
continue
simple_score_value = simple_scores.loc[(simple_scores['Class'] == curr_class) &
(simple_scores['Metric'] == metric)]['Value'].values[0]
gain = get_gain(simple_score_value,
curr_value,
perfect,
max_gain)
update_min_gain(gain, metric, curr_class_name)
if gain <= min_allowed_gain:
failed_classes[curr_class_name] = format_percent(gain)
if failed_classes:
fails[metric] = failed_classes
else:
scores = average_scores(scores, simple_scores, include_classes)
for metric, models_scores in scores.items():
metric_perfect_score = perfect_scores.loc[(perfect_scores['Metric'] == metric)]['Value'].values[0]
# If origin model is perfect, skip the gain calculation
if models_scores['Origin'] == metric_perfect_score:
continue
gain = get_gain(models_scores['Simple'],
models_scores['Origin'],
metric_perfect_score,
max_gain)
update_min_gain(gain, metric)
if gain <= min_allowed_gain:
fails[metric] = format_percent(gain)
if fails:
msg = f'Found metrics with gain below threshold: {fails}'
return ConditionResult(ConditionCategory.FAIL, msg)
else:
return ConditionResult(ConditionCategory.PASS, min_gain[1])
def average_scores(scores, simple_model_scores, include_classes):
"""
Calculate the average of the scores for each metric for all classes.
Parameters
----------
scores : pd.DataFrame
the scores for the given model
simple_model_scores : pd.DataFrame
the scores for the simple model
include_classes : List[Hashable]
the classes to include in the calculation
Returns
-------
Dictionary[str, Dictionary[str, float]]
the average scores for each metric. The keys are the metric names, and the values are a dictionary
with the keys being Origin and Simple and the values being the average score.
"""
result = {}
metrics = scores['Metric'].unique()
for metric in metrics:
model_score = 0
simple_score = 0
total = 0
for _, row in scores.loc[scores['Metric'] == metric].iterrows():
if include_classes and row['Class'] not in include_classes:
continue
model_score += row['Value']
simple_score += simple_model_scores.loc[(simple_model_scores['Class'] == row['Class']) &
(simple_model_scores['Metric'] == metric)]['Value'].values[0]
total += 1
result[metric] = {
'Origin': model_score / total,
'Simple': simple_score / total
}
return result | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/checks/model_evaluation/simple_model_comparison.py | 0.942334 | 0.583856 | simple_model_comparison.py | pypi |
"""Module containing a check for computing a scalar performance metric for a single dataset."""
import numbers
import typing as t
import warnings
import torch
from ignite.metrics import Accuracy, Metric
from runml_checks.core import CheckResult, ConditionResult, DatasetKind
from runml_checks.core.condition import ConditionCategory
from runml_checks.core.errors import runml_checksValueError
from runml_checks.utils.strings import format_number
from runml_checks.vision import Batch, Context, SingleDatasetCheck
from runml_checks.vision.metrics_utils.detection_precision_recall import ObjectDetectionAveragePrecision
from runml_checks.vision.vision_data import TaskType
__all__ = ['SingleDatasetScalarPerformance']
class SingleDatasetScalarPerformance(SingleDatasetCheck):
"""Calculate a performance metric as a scalar for a given model and a given dataset.
Parameters
----------
metric: Metric, default: None
An ignite.Metric object whose score should be used. If None is given, use the default metric.
reduce: torch function, default: None
The function to reduce the scores tensor into a single scalar. For metrics that return a scalar use None
(default).
metric_name: str, default: None
A name for the metric to show in the check results.
reduce_name: str, default: None
A name for the reduce function to show in the check results.
"""
def __init__(self,
metric: Metric = None,
reduce: t.Callable = None,
metric_name: str = None,
reduce_name: str = None,
**kwargs):
super().__init__(**kwargs)
self.metric = metric
self.reduce = reduce
self.metric_name = metric_name or (metric.__class__.__name__ if metric else None)
self.reduce_name = reduce_name or (reduce.__name__ if reduce else None)
def initialize_run(self, context: Context, dataset_kind: DatasetKind.TRAIN):
"""Initialize the metric for the check, and validate task type is relevant."""
if self.metric is None:
if context.train.task_type == TaskType.CLASSIFICATION:
self.metric = Accuracy()
if self.metric_name is None:
self.metric_name = 'accuracy'
elif context.train.task_type == TaskType.OBJECT_DETECTION:
self.metric = ObjectDetectionAveragePrecision()
if self.metric_name is None:
self.metric_name = 'object_detection_average_precision'
if self.reduce is None:
self.reduce = torch.nanmean
self.reduce_name = 'nan_mean'
else:
raise runml_checksValueError('For task types other then classification or object detection, '
'pass a metric explicitly')
self.metric.reset()
def update(self, context: Context, batch: Batch, dataset_kind: DatasetKind.TRAIN):
"""Update the metrics by passing the batch to ignite metric update method."""
label = batch.labels
prediction = batch.predictions
self.metric.update((prediction, label))
def compute(self, context: Context, dataset_kind: DatasetKind.TRAIN) -> CheckResult:
"""Compute the metric result using the ignite metrics compute method and reduce to a scalar."""
metric_result = self.metric.compute()
if self.reduce is not None:
if isinstance(metric_result, numbers.Real):
warnings.warn(SyntaxWarning('Metric result is already scalar, skipping reduce operation.'
'Pass reduce=None to prevent this'))
result_value = float(metric_result)
else:
result_value = float(self.reduce(metric_result))
elif isinstance(metric_result, float):
result_value = metric_result
else:
raise runml_checksValueError(f'The metric {self.metric.__class__} return a non-scalar value, '
f'please specify a reduce function or choose a different metric')
result_dict = {'score': result_value,
'metric': self.metric_name,
'reduce': self.reduce_name}
return CheckResult(result_dict)
def add_condition_greater_than(self, threshold: float) -> ConditionResult:
"""Add condition - the result is greater than the threshold."""
def condition(check_result):
details = f'The score {self.metric_name} is {format_number(check_result["score"])}'
if check_result['score'] > threshold:
return ConditionResult(ConditionCategory.PASS, details)
else:
return ConditionResult(ConditionCategory.FAIL, details)
return self.add_condition(f'Score is greater than {threshold}', condition)
def add_condition_greater_or_equal(self, threshold: float) -> ConditionResult:
"""Add condition - the result is greater or equal to the threshold."""
def condition(check_result):
details = f'The score {self.metric_name} is {format_number(check_result["score"])}'
if check_result['score'] >= threshold:
return ConditionResult(ConditionCategory.PASS, details)
else:
return ConditionResult(ConditionCategory.FAIL, details)
return self.add_condition(f'Score is greater or equal to {threshold}', condition)
def add_condition_less_than(self, threshold: float) -> ConditionResult:
"""Add condition - the result is less than the threshold."""
def condition(check_result):
details = f'The score {self.metric_name} is {format_number(check_result["score"])}'
if check_result['score'] < threshold:
return ConditionResult(ConditionCategory.PASS, details)
else:
return ConditionResult(ConditionCategory.FAIL, details)
return self.add_condition(f'Score is less than {threshold}', condition)
def add_condition_less_or_equal(self, threshold: float) -> ConditionResult:
"""Add condition - the result is less or equal to the threshold."""
def condition(check_result):
details = f'The score {self.metric_name} is {format_number(check_result["score"])}'
if check_result['score'] <= threshold:
return ConditionResult(ConditionCategory.PASS, details)
else:
return ConditionResult(ConditionCategory.FAIL, details)
return self.add_condition(f'Score is less or equal to {threshold}', condition) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/checks/model_evaluation/single_dataset_scalar_performance.py | 0.961198 | 0.483039 | single_dataset_scalar_performance.py | pypi |
"""Module containing class performance check."""
from typing import Dict, List, TypeVar
import pandas as pd
import plotly.express as px
from ignite.metrics import Metric
from runml_checks.core import CheckResult, DatasetKind
from runml_checks.core.check_utils.class_performance_utils import (
get_condition_class_performance_imbalance_ratio_less_than, get_condition_test_performance_greater_than,
get_condition_train_test_relative_degradation_less_than)
from runml_checks.core.errors import runml_checksValueError
from runml_checks.utils import plot
from runml_checks.utils.strings import format_percent
from runml_checks.vision import Batch, Context, TrainTestCheck
from runml_checks.vision.metrics_utils.metrics import filter_classes_for_display, get_scorers_list, metric_results_to_df
__all__ = ['ClassPerformance']
PR = TypeVar('PR', bound='ClassPerformance')
class ClassPerformance(TrainTestCheck):
"""Summarize given metrics on a dataset and model.
Parameters
----------
alternative_metrics : Dict[str, Metric], default: None
A dictionary of metrics, where the key is the metric name and the value is an ignite.Metric object whose score
should be used. If None are given, use the default metrics.
n_to_show : int, default: 20
Number of classes to show in the report. If None, show all classes.
show_only : str, default: 'largest'
Specify which classes to show in the report. Can be one of the following:
- 'largest': Show the largest classes.
- 'smallest': Show the smallest classes.
- 'random': Show random classes.
- 'best': Show the classes with the highest score.
- 'worst': Show the classes with the lowest score.
metric_to_show_by : str, default: None
Specify the metric to sort the results by. Relevant only when show_only is 'best' or 'worst'.
If None, sorting by the first metric in the default metrics list.
class_list_to_show: List[int], default: None
Specify the list of classes to show in the report. If specified, n_to_show, show_only and metric_to_show_by
are ignored.
"""
def __init__(self,
alternative_metrics: Dict[str, Metric] = None,
n_to_show: int = 20,
show_only: str = 'largest',
metric_to_show_by: str = None,
class_list_to_show: List[int] = None,
**kwargs):
super().__init__(**kwargs)
self.alternative_metrics = alternative_metrics
self.n_to_show = n_to_show
self.class_list_to_show = class_list_to_show
if self.class_list_to_show is None:
if show_only not in ['largest', 'smallest', 'random', 'best', 'worst']:
raise runml_checksValueError(f'Invalid value for show_only: {show_only}. Should be one of: '
f'["largest", "smallest", "random", "best", "worst"]')
self.show_only = show_only
if alternative_metrics is not None and show_only in ['best', 'worst'] and metric_to_show_by is None:
raise runml_checksValueError('When alternative_metrics are provided and show_only is one of: '
'["best", "worst"], metric_to_show_by must be specified.')
self.metric_to_show_by = metric_to_show_by
self._data_metrics = {}
def initialize_run(self, context: Context):
"""Initialize run by creating the _state member with metrics for train and test."""
self._data_metrics = {}
self._data_metrics[DatasetKind.TRAIN] = get_scorers_list(context.train,
alternative_scorers=self.alternative_metrics)
self._data_metrics[DatasetKind.TEST] = get_scorers_list(context.train,
alternative_scorers=self.alternative_metrics)
if not self.metric_to_show_by:
self.metric_to_show_by = list(self._data_metrics[DatasetKind.TRAIN].keys())[0]
def update(self, context: Context, batch: Batch, dataset_kind: DatasetKind):
"""Update the metrics by passing the batch to ignite metric update method."""
label = batch.labels
prediction = batch.predictions
for _, metric in self._data_metrics[dataset_kind].items():
metric.update((prediction, label))
def compute(self, context: Context) -> CheckResult:
"""Compute the metric result using the ignite metrics compute method and create display."""
results = []
for dataset_kind in [DatasetKind.TRAIN, DatasetKind.TEST]:
dataset = context.get_data_by_kind(dataset_kind)
metrics_df = metric_results_to_df(
{k: m.compute() for k, m in self._data_metrics[dataset_kind].items()}, dataset
)
metrics_df['Dataset'] = dataset_kind.value
metrics_df['Number of samples'] = metrics_df['Class'].map(dataset.n_of_samples_per_class.get)
results.append(metrics_df)
results_df = pd.concat(results)
results_df = results_df[['Dataset', 'Metric', 'Class', 'Class Name', 'Number of samples', 'Value']]
results_df = results_df.sort_values(by=['Dataset', 'Value'], ascending=False)
if context.with_display:
if self.class_list_to_show is not None:
display_df = results_df.loc[results_df['Class'].isin(self.class_list_to_show)]
elif self.n_to_show is not None:
rows = results_df['Class'].isin(filter_classes_for_display(
results_df,
self.metric_to_show_by,
self.n_to_show,
self.show_only
))
display_df = results_df.loc[rows]
else:
display_df = results_df
fig = (
px.histogram(
display_df,
x='Class Name',
y='Value',
color='Dataset',
color_discrete_sequence=(plot.colors['Train'], plot.colors['Test']),
barmode='group',
facet_col='Metric',
facet_col_spacing=0.05,
hover_data=['Number of samples'])
.update_xaxes(title='Class', type='category')
.update_yaxes(title='Value', matches=None)
.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))
.for_each_yaxis(lambda yaxis: yaxis.update(showticklabels=True))
)
else:
fig = None
return CheckResult(
results_df,
header='Class Performance',
display=fig
)
def add_condition_test_performance_greater_than(self: PR, min_score: float) -> PR:
"""Add condition - metric scores are greater than the threshold.
Parameters
----------
min_score : float
Minimum score to pass the check.
"""
condition = get_condition_test_performance_greater_than(min_score=min_score)
return self.add_condition(f'Scores are greater than {min_score}', condition)
def add_condition_train_test_relative_degradation_less_than(self: PR, threshold: float = 0.1) -> PR:
"""Add condition - test performance is not degraded by more than given percentage in train.
Parameters
----------
threshold : float , default: 0.1
maximum degradation ratio allowed (value between 0 and 1)
"""
condition = get_condition_train_test_relative_degradation_less_than(threshold=threshold)
return self.add_condition(f'Train-Test scores relative degradation is less than {threshold}',
condition)
def add_condition_class_performance_imbalance_ratio_less_than(
self: PR,
threshold: float = 0.3,
score: str = None
) -> PR:
"""Add condition - relative ratio difference between highest-class and lowest-class is less than threshold.
Parameters
----------
threshold : float , default: 0.3
ratio difference threshold
score : str , default: None
limit score for condition
Returns
-------
Self
instance of 'ClassPerformance' or it subtype
Raises
------
runml_checksValueError
if unknown score function name were passed.
"""
if score is None:
raise runml_checksValueError('Must define "score" parameter')
condition = get_condition_class_performance_imbalance_ratio_less_than(threshold=threshold, score=score)
return self.add_condition(
name=f'Relative ratio difference between labels \'{score}\' score is less than {format_percent(threshold)}',
condition_func=condition
) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/checks/model_evaluation/class_performance.py | 0.962935 | 0.418756 | class_performance.py | pypi |
"""Module containing mean average recall report check."""
import math
from typing import Tuple, TypeVar
import numpy as np
import pandas as pd
from runml_checks.core import CheckResult, ConditionResult, DatasetKind
from runml_checks.core.condition import ConditionCategory
from runml_checks.utils.strings import format_number
from runml_checks.vision import Batch, Context, SingleDatasetCheck
from runml_checks.vision.metrics_utils.detection_precision_recall import ObjectDetectionAveragePrecision
from runml_checks.vision.vision_data import TaskType
__all__ = ['MeanAverageRecallReport']
MPR = TypeVar('MPR', bound='MeanAverageRecallReport')
class MeanAverageRecallReport(SingleDatasetCheck):
"""Summarize mean average recall metrics on a dataset and model per detections and area range.
Parameters
----------
area_range: tuple, default: (32**2, 96**2)
Slices for small/medium/large buckets.
"""
def __init__(self, area_range: Tuple = (32 ** 2, 96 ** 2), **kwargs):
super().__init__(**kwargs)
self._area_range = area_range
def initialize_run(self, context: Context, dataset_kind: DatasetKind = None):
"""Initialize run by asserting task type and initializing metric."""
context.assert_task_type(TaskType.OBJECT_DETECTION)
self._ap_metric = ObjectDetectionAveragePrecision(return_option=None, area_range=self._area_range)
def update(self, context: Context, batch: Batch, dataset_kind: DatasetKind):
"""Update the metrics by passing the batch to ignite metric update method."""
label = batch.labels
prediction = batch.predictions
self._ap_metric.update((prediction, label))
def compute(self, context: Context, dataset_kind: DatasetKind) -> CheckResult:
"""Compute the metric result using the ignite metrics compute method and create display."""
small_area = int(math.sqrt(self._area_range[0]))
large_area = int(math.sqrt(self._area_range[1]))
res = self._ap_metric.compute()[0]['recall']
rows = []
for title, area_name in zip(['All',
f'Small (area < {small_area}^2)',
f'Medium ({small_area}^2 < area < {large_area}^2)',
f'Large (area < {large_area}^2)'],
['all', 'small', 'medium', 'large']):
rows.append([
title,
self._ap_metric.get_classes_scores_at(res, area=area_name, max_dets=1),
self._ap_metric.get_classes_scores_at(res, area=area_name, max_dets=10),
self._ap_metric.get_classes_scores_at(res, area=area_name, max_dets=100)
])
results = pd.DataFrame(data=rows, columns=['Area size', 'AR@1 (%)', 'AR@10 (%)', 'AR@100 (%)'])
results = results.set_index('Area size')
return CheckResult(value=results, display=[results])
def add_condition_test_average_recall_greater_than(self: MPR, min_score: float) -> MPR:
"""Add condition - AR score is greater than given score.
Parameters
----------
min_score : float
Minimum score to pass the check.
"""
def condition(df: pd.DataFrame):
min_col_per_row = df.idxmin(axis=1)
min_score_per_row = [df.loc[r, c] for r, c in min_col_per_row.items()]
loc_min_row = np.argmin(min_score_per_row)
score = min_score_per_row[loc_min_row]
area = min_col_per_row.index[loc_min_row]
iou = min_col_per_row[loc_min_row]
category = ConditionCategory.FAIL if score < min_score else ConditionCategory.PASS
details = f'Found lowest score of {format_number(score)} for area {area} and IoU {iou}'
return ConditionResult(category, details)
return self.add_condition(f'Scores are greater than {min_score}', condition) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/checks/model_evaluation/mean_average_recall_report.py | 0.968959 | 0.519887 | mean_average_recall_report.py | pypi |
"""Module for converting YOLO annotations to COCO format."""
import datetime
import json
import os
import os.path as osp
import uuid
from typing import Optional, Sequence, Union
import cv2
import numpy as np
YOLO_PATH = "/Users/nirbenzvi/code/runml_checks/coco128"
# Complete this by putting COCO labels
CATEGORIES = ("person", "bicycle", "car", "motorcycle", "airplane", "bus",
"train", "truck", "boat", "traffic light", "fire hydrant",
"stop sign", "parking meter", "bench", "bird", "cat", "dog",
"horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe",
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat",
"baseball glove", "skateboard", "surfboard", "tennis racket",
"bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl",
"banana", "apple", "sandwich", "orange", "broccoli", "carrot",
"hot dog", "pizza", "donut", "cake", "chair", "couch",
"potted plant", "bed", "dining table", "toilet", "tv", "laptop",
"mouse", "remote", "keyboard", "cell phone", "microwave",
"oven", "toaster", "sink", "refrigerator", "book", "clock",
"vase", "scissors", "teddy bear", "hair drier", "toothbrush")
class YoloParser:
"""Parses input images and labels in the YOLO format.
Parameters
----------
category_list : list of str or dict
List of categories or dictionary mapping category id to category name
"""
def __init__(self, category_list: Optional[Union[Sequence, str, dict]] = CATEGORIES):
if isinstance(category_list, (list, dict, tuple)):
self._categories = category_list
else:
with open(category_list, "r", encoding="utf8") as fid:
self._categories = fid.readlines()
self._annotations = []
self._images = []
def parse_label_file(self, full_label_path: str):
"""Parse a single label file.
Parameters
----------
full_label_path : str
Path to the label file.
"""
labels = []
with open(full_label_path, "r", encoding="utf8") as fid:
for line in fid:
labels.append(list(map(float, line.split(" "))))
return np.array(labels)
def parse_images_and_labels(self, images_path: str, labels_path: str):
"""
We assume image and labels are correlated, meaning equivalent directories with matching image and label names.
Parameters
----------
images_path : str
Path to the images directory.
labels_path : str
Path to the labels directory.
"""
for img_path in [f for f in os.listdir(images_path) if f[-3:].lower() in ["jpg", "jpeg", "png"]]:
full_img_path = osp.join(images_path, img_path)
full_label_path = osp.join(labels_path, osp.splitext(img_path)[0] + ".txt")
assert osp.isfile(full_label_path), f"No matching label for image {full_img_path}!"
h, w, _ = cv2.imread(full_img_path).shape
labels = self.parse_label_file(full_label_path)
if len(labels):
labels[:, [1, 3]] *= w
labels[:, [2, 4]] *= h
# TODO perhaps running index?
curr_img_id = uuid.uuid4().int
img_dict = {"id": curr_img_id,
"license": -1,
"coco_url": "N/A",
"flickr_url": "N/A",
"width": w,
"height": h,
"file_name": full_img_path,
"date_captured": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
self._images.append(img_dict)
for l in labels:
bbox = l[1:].tolist()
category_id = int(l[0])
# annotation ID doesn't really matter so we use running index
img_ann = {"id": len(self._annotations),
"category_id": category_id,
"iscrowd": 0,
"segmentation": [[]],
"image_id": curr_img_id,
"area": bbox[2] * bbox[3],
"bbox": bbox}
self._annotations.append(img_ann)
def parse_yolo_dir(self, yolo_path: str):
"""
Create COCO dataset from a directory containing images and labels in the YOLO format.
Used https://docs.aws.amazon.com/rekognition/latest/customlabels-dg/md-coco-overview.html
for reference
Parameters
----------
yolo_path : str
Path to the YOLO directory.
"""
image_dir = osp.join(yolo_path, "images")
label_dir = osp.join(yolo_path, "labels")
if not osp.isdir(image_dir) or not osp.isdir(label_dir):
raise RuntimeError("Bad YOLO directory structure")
dataset_subdirs = [f for f in os.listdir(image_dir) if osp.isdir(osp.join(image_dir, f))]
for subdir in dataset_subdirs:
# this implies image directory with corresponding labels
if osp.isdir(osp.join(label_dir, subdir)):
c_image_dir = osp.join(image_dir, subdir)
c_label_dir = osp.join(label_dir, subdir)
self.parse_images_and_labels(c_image_dir, c_label_dir)
def save_coco_json(self, output_path: str):
"""Save the COCO dataset to a JSON file.
Parameters
----------
output_path : str
Path to the output JSON file.
"""
coco_json = {}
coco_json["info"] = {
"description": "COCO Dataset From Script",
"url": "http://cocodataset.org",
"version": "1.0",
"year": datetime.datetime.now().date().year,
"contributor": "@nirbenz",
"date_created": datetime.datetime.now().date().strftime("%Y/%m/%d")
}
# TODO license
coco_json["licenses"] = "#TODO"
coco_json["images"] = self._images
coco_json["annotations"] = self._annotations
if isinstance(self._categories, dict):
coco_json["categories"] = self._categories
else:
coco_json["categories"] = [{"id": idx, "supercategory": c, "name": c}
for idx, c in enumerate(self._categories)]
with open(output_path, "w", encoding="utf8") as fid:
json.dump(coco_json, fid, indent=4, sort_keys=True)
if __name__ == "__main__":
parser = YoloParser()
parser.parse_yolo_dir(YOLO_PATH)
parser.save_coco_json("./coco_128.json") | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/datasets/detection/yolo_to_coco.py | 0.76207 | 0.340622 | yolo_to_coco.py | pypi |
"""Module for loading a sample of the COCO dataset and the yolov5s model."""
import contextlib
import logging
import os
import typing as t
import warnings
from pathlib import Path
from typing import Iterable, List
import albumentations as A
import cv2
import numpy as np
import torch
from PIL import Image
from torch import nn
from torch.utils.data import DataLoader
from torchvision.datasets import VisionDataset
from torchvision.datasets.utils import download_and_extract_archive
from typing_extensions import Literal
from runml_checks import vision
from runml_checks.vision import DetectionData
__all__ = ['load_dataset', 'load_model', 'COCOData', 'CocoDataset']
DATA_DIR = Path(__file__).absolute().parent
def load_model(pretrained: bool = True, device: t.Union[str, torch.device] = 'cpu') -> nn.Module:
"""Load the yolov5s (version 6.1) model and return it."""
dev = torch.device(device) if isinstance(device, str) else device
logger = logging.getLogger('yolov5')
logger.disabled = True
model = torch.hub.load('ultralytics/yolov5:v6.1', 'yolov5s',
pretrained=pretrained,
verbose=False,
device=dev)
model.eval()
logger.disabled = False
return model
class COCOData(DetectionData):
"""Class for loading the COCO dataset, inherits from :class:`~runml_checks.vision.DetectionData`.
Implement the necessary methods to load the dataset.
"""
def batch_to_labels(self, batch) -> List[torch.Tensor]:
"""Convert the batch to a list of labels."""
def move_class(tensor):
return torch.index_select(tensor, 1, torch.LongTensor([4, 0, 1, 2, 3]).to(tensor.device)) \
if len(tensor) > 0 else tensor
return [move_class(tensor) for tensor in batch[1]]
def infer_on_batch(self, batch, model, device) -> List[torch.Tensor]:
"""Infer on a batch of images."""
return_list = []
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=UserWarning)
predictions: 'yolov5.models.common.Detections' = model.to(device)(batch[0]) # noqa: F821
# yolo Detections objects have List[torch.Tensor] xyxy output in .pred
for single_image_tensor in predictions.pred:
pred_modified = torch.clone(single_image_tensor)
pred_modified[:, 2] = pred_modified[:, 2] - pred_modified[:, 0] # w = x_right - x_left
pred_modified[:, 3] = pred_modified[:, 3] - pred_modified[:, 1] # h = y_bottom - y_top
return_list.append(pred_modified)
return return_list
def batch_to_images(self, batch) -> Iterable[np.ndarray]:
"""Convert the batch to a list of images."""
return [np.array(x) for x in batch[0]]
def _batch_collate(batch):
imgs, labels = zip(*batch)
return list(imgs), list(labels)
def load_dataset(
train: bool = True,
batch_size: int = 32,
num_workers: int = 0,
shuffle: bool = True,
pin_memory: bool = True,
object_type: Literal['VisionData', 'DataLoader'] = 'DataLoader'
) -> t.Union[DataLoader, vision.VisionData]:
"""Get the COCO128 dataset and return a dataloader.
Parameters
----------
train : bool, default: True
if `True` train dataset, otherwise test dataset
batch_size : int, default: 32
Batch size for the dataloader.
num_workers : int, default: 0
Number of workers for the dataloader.
shuffle : bool, default: True
Whether to shuffle the dataset.
pin_memory : bool, default: True
If ``True``, the data loader will copy Tensors
into CUDA pinned memory before returning them.
object_type : Literal['Dataset', 'DataLoader'], default: 'DataLoader'
type of the return value. If 'Dataset', :obj:`runml_checks.vision.VisionDataset`
will be returned, otherwise :obj:`torch.utils.data.DataLoader`
Returns
-------
Union[DataLoader, VisionDataset]
A DataLoader or VisionDataset instance representing COCO128 dataset
"""
root = DATA_DIR
coco_dir, dataset_name = CocoDataset.download_coco128(root)
dataloader = DataLoader(
dataset=CocoDataset(
root=str(coco_dir),
name=dataset_name,
train=train,
transforms=A.Compose([
A.NoOp()
],
bbox_params=A.BboxParams(format='coco')
)
),
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=_batch_collate,
pin_memory=pin_memory,
generator=torch.Generator()
)
if object_type == 'DataLoader':
return dataloader
elif object_type == 'VisionData':
return COCOData(
data_loader=dataloader,
num_classes=80,
label_map=LABEL_MAP
)
else:
raise TypeError(f'Unknown value of object_type - {object_type}')
class CocoDataset(VisionDataset):
"""An instance of PyTorch VisionData the represents the COCO128 dataset.
Parameters
----------
root : str
Path to the root directory of the dataset.
name : str
Name of the dataset.
train : bool
if `True` train dataset, otherwise test dataset
transform : Callable, optional
A function/transforms that takes in an image and a label and returns the
transformed versions of both.
E.g, ``transforms.Rotate``
target_transform : Callable, optional
A function/transform that takes in the target and transforms it.
transforms : Callable, optional
A function/transform that takes in an PIL image and returns a transformed version.
E.g, transforms.RandomCrop
"""
TRAIN_FRACTION = 0.5
def __init__(
self,
root: str,
name: str,
train: bool = True,
transform: t.Optional[t.Callable] = None,
target_transform: t.Optional[t.Callable] = None,
transforms: t.Optional[t.Callable] = None,
) -> None:
super().__init__(root, transforms, transform, target_transform)
self.train = train
self.root = Path(root).absolute()
self.images_dir = Path(root) / 'images' / name
self.labels_dir = Path(root) / 'labels' / name
images: t.List[Path] = sorted(self.images_dir.glob('./*.jpg'))
labels: t.List[t.Optional[Path]] = []
for image in images:
label = self.labels_dir / f'{image.stem}.txt'
labels.append(label if label.exists() else None)
assert \
len(images) != 0, \
'Did not find folder with images or it was empty'
assert \
not all(l is None for l in labels), \
'Did not find folder with labels or it was empty'
train_len = int(self.TRAIN_FRACTION * len(images))
if self.train is True:
self.images = images[0:train_len]
self.labels = labels[0:train_len]
else:
self.images = images[train_len:]
self.labels = labels[train_len:]
def __getitem__(self, idx: int) -> t.Tuple[Image.Image, np.ndarray]:
"""Get the image and label at the given index."""
# open image using cv2, since opening with Pillow give slightly different results based on Pillow version
opencv_image = cv2.imread(str(self.images[idx]))
img = Image.fromarray(cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB))
label_file = self.labels[idx]
if label_file is not None:
img_labels = [l.split() for l in label_file.open('r').read().strip().splitlines()]
img_labels = np.array(img_labels, dtype=np.float32)
else:
img_labels = np.zeros((0, 5), dtype=np.float32)
# Transform x,y,w,h in yolo format (x, y are of the image center, and coordinates are normalized) to standard
# x,y,w,h format, where x,y are of the top left corner of the bounding box and coordinates are absolute.
bboxes = []
for label in img_labels:
x, y, w, h = label[1:]
# Note: probably the normalization loses some accuracy in the coordinates as it truncates the number,
# leading in some cases to `y - h / 2` or `x - w / 2` to be negative
bboxes.append(np.array([
max((x - w / 2) * img.width, 0),
max((y - h / 2) * img.height, 0),
w * img.width,
h * img.height,
label[0]
]))
img, bboxes = self.apply_transform(img, bboxes)
# Return tensor of bboxes
if bboxes:
bboxes = torch.stack([torch.tensor(x) for x in bboxes])
else:
bboxes = torch.tensor([])
return img, bboxes
def apply_transform(self, img, bboxes):
"""Implement the transform in a function to be able to override it in tests."""
if self.transforms is not None:
# Albumentations accepts images as numpy and bboxes in defined format + class at the end
transformed = self.transforms(image=np.array(img), bboxes=bboxes)
img = Image.fromarray(transformed['image'])
bboxes = transformed['bboxes']
return img, bboxes
def __len__(self) -> int:
"""Return the number of images in the dataset."""
return len(self.images)
@classmethod
def download_coco128(cls, root: t.Union[str, Path]) -> t.Tuple[Path, str]:
"""Download coco128 and returns the root path and folder name."""
root = root if isinstance(root, Path) else Path(root)
coco_dir = root / 'coco128'
images_dir = coco_dir / 'images' / 'train2017'
labels_dir = coco_dir / 'labels' / 'train2017'
if not (root.exists() and root.is_dir()):
raise RuntimeError(f'root path does not exist or is not a dir - {root}')
if images_dir.exists() and labels_dir.exists():
return coco_dir, 'train2017'
return download_coco128_from_ultralytics(root)
def download_coco128_from_ultralytics(path: Path):
"""Download coco from ultralytics using torchvision download_and_extract_archive."""
coco_dir = path / 'coco128'
url = 'https://ultralytics.com/assets/coco128.zip'
md5 = '90faf47c90d1cfa5161c4298d890df55'
with open(os.devnull, 'w', encoding='utf8') as f, contextlib.redirect_stdout(f):
download_and_extract_archive(
url,
download_root=str(path),
extract_root=str(path),
md5=md5
)
# Removing the README.txt file if it exists since it causes issues with sphinx-gallery
try:
os.remove(str(coco_dir / 'README.txt'))
except FileNotFoundError:
pass
return coco_dir, 'train2017'
def yolo_prediction_formatter(batch, model, device) -> t.List[torch.Tensor]:
"""Convert from yolo Detections object to List (per image) of Tensors of the shape [N, 6] with each row being \
[x, y, w, h, confidence, class] for each bbox in the image."""
return_list = []
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=UserWarning)
predictions: 'ultralytics.models.common.Detections' = model.to(device)(batch[0]) # noqa: F821
# yolo Detections objects have List[torch.Tensor] xyxy output in .pred
for single_image_tensor in predictions.pred:
pred_modified = torch.clone(single_image_tensor)
pred_modified[:, 2] = pred_modified[:, 2] - pred_modified[:, 0] # w = x_right - x_left
pred_modified[:, 3] = pred_modified[:, 3] - pred_modified[:, 1] # h = y_bottom - y_top
return_list.append(pred_modified)
return return_list
def yolo_label_formatter(batch):
"""Translate yolo label to runml_checks format."""
# our labels return at the end, and the VisionDataset expect it at the start
def move_class(tensor):
return torch.index_select(tensor, 1, torch.LongTensor([4, 0, 1, 2, 3]).to(tensor.device)) \
if len(tensor) > 0 else tensor
return [move_class(tensor) for tensor in batch[1]]
def yolo_image_formatter(batch):
"""Convert list of PIL images to runml_checks image format."""
# Yolo works on PIL and VisionDataset expects images as numpy arrays
return [np.array(x) for x in batch[0]]
LABEL_MAP = {
0: 'person',
1: 'bicycle',
2: 'car',
3: 'motorcycle',
4: 'airplane',
5: 'bus',
6: 'train',
7: 'truck',
8: 'boat',
9: 'traffic light',
10: 'fire hydrant',
11: 'stop sign',
12: 'parking meter',
13: 'bench',
14: 'bird',
15: 'cat',
16: 'dog',
17: 'horse',
18: 'sheep',
19: 'cow',
20: 'elephant',
21: 'bear',
22: 'zebra',
23: 'giraffe',
24: 'backpack',
25: 'umbrella',
26: 'handbag',
27: 'tie',
28: 'suitcase',
29: 'frisbee',
30: 'skis',
31: 'snowboard',
32: 'sports ball',
33: 'kite',
34: 'baseball bat',
35: 'baseball glove',
36: 'skateboard',
37: 'surfboard',
38: 'tennis racket',
39: 'bottle',
40: 'wine glass',
41: 'cup',
42: 'fork',
43: 'knife',
44: 'spoon',
45: 'bowl',
46: 'banana',
47: 'apple',
48: 'sandwich',
49: 'orange',
50: 'broccoli',
51: 'carrot',
52: 'hot dog',
53: 'pizza',
54: 'donut',
55: 'cake',
56: 'chair',
57: 'couch',
58: 'potted plant',
59: 'bed',
60: 'dining table',
61: 'toilet',
62: 'tv',
63: 'laptop',
64: 'mouse',
65: 'remote',
66: 'keyboard',
67: 'cell phone',
68: 'microwave',
69: 'oven',
70: 'toaster',
71: 'sink',
72: 'refrigerator',
73: 'book',
74: 'clock',
75: 'vase',
76: 'scissors',
77: 'teddy bear',
78: 'hair drier',
79: 'toothbrush'
} | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/datasets/detection/coco.py | 0.940776 | 0.751397 | coco.py | pypi |
"""Module containing measurements for labels and predictions."""
import warnings
from typing import Any, Dict, List, Sequence
import torch
from runml_checks.core.errors import runml_checksValueError
# Labels
def _get_bbox_area(labels: List[torch.Tensor]) -> List[List[int]]:
"""Return a list containing the area of bboxes in batch."""
return [(label.reshape((-1, 5))[:, 4] * label.reshape((-1, 5))[:, 3]).tolist()
for label in labels]
def _count_num_bboxes(labels: List[torch.Tensor]) -> List[int]:
"""Return a list containing the number of bboxes in per sample batch."""
num_bboxes = [label.shape[0] for label in labels]
return num_bboxes
def _get_samples_per_class_object_detection(labels: List[torch.Tensor]) -> List[List[int]]:
"""Return a list containing the classes in batch."""
return [tensor.reshape((-1, 5))[:, 0].tolist() for tensor in labels]
def _get_samples_per_class_classification(labels: torch.Tensor) -> List[int]:
"""Return a list containing the class per image in batch."""
return labels.tolist()
DEFAULT_CLASSIFICATION_LABEL_PROPERTIES = [
{'name': 'Samples Per Class', 'method': _get_samples_per_class_classification, 'output_type': 'class_id'}
]
DEFAULT_OBJECT_DETECTION_LABEL_PROPERTIES = [
{'name': 'Samples Per Class', 'method': _get_samples_per_class_object_detection, 'output_type': 'class_id'},
{'name': 'Bounding Box Area (in pixels)', 'method': _get_bbox_area, 'output_type': 'numerical'},
{'name': 'Number of Bounding Boxes Per Image', 'method': _count_num_bboxes, 'output_type': 'numerical'},
]
# Predictions
def _get_samples_per_predicted_class_classification(predictions: torch.Tensor) -> List[int]:
"""Return a list containing the classes in batch."""
return torch.argmax(predictions, dim=1).tolist()
def _get_samples_per_predicted_class_object_detection(predictions: List[torch.Tensor]) -> List[List[int]]:
"""Return a list containing the classes in batch."""
return [tensor.reshape((-1, 6))[:, -1].tolist() for tensor in predictions]
def _get_predicted_bbox_area(predictions: List[torch.Tensor]) -> List[List[int]]:
"""Return a list containing the area of bboxes per image in batch."""
return [(prediction.reshape((-1, 6))[:, 2] * prediction.reshape((-1, 6))[:, 3]).tolist()
for prediction in predictions]
DEFAULT_CLASSIFICATION_PREDICTION_PROPERTIES = [
{
'name': 'Samples Per Class',
'method': _get_samples_per_predicted_class_classification,
'output_type': 'class_id'
}
]
DEFAULT_OBJECT_DETECTION_PREDICTION_PROPERTIES = [
{
'name': 'Samples Per Class',
'method': _get_samples_per_predicted_class_object_detection,
'output_type': 'class_id'
},
{
'name': 'Bounding Box Area (in pixels)',
'method': _get_predicted_bbox_area,
'output_type': 'numerical'},
{
'name': 'Number of Bounding Boxes Per Image',
'method': _count_num_bboxes,
'output_type': 'numerical'
},
]
# Helper functions
def validate_properties(properties: List[Dict[str, Any]]):
"""Validate structure of measurements."""
if not isinstance(properties, list):
raise runml_checksValueError(
'Expected properties to be a list, '
f'instead got {type(properties).__name__}'
)
if len(properties) == 0:
raise runml_checksValueError('Properties list can\'t be empty')
expected_keys = ('name', 'method', 'output_type')
deprecated_output_types = ('discrete', 'continuous')
output_types = ('categorical', 'numerical', 'class_id')
errors = []
list_of_warnings = []
for index, label_property in enumerate(properties):
if not isinstance(label_property, dict):
errors.append(
f'Item #{index}: property must be of type dict, '
f'and include keys {expected_keys}. Instead got {type(label_property).__name__}'
)
continue
property_name = label_property.get('name') or f'#{index}'
difference = sorted(set(expected_keys).difference(set(label_property.keys())))
if len(difference) > 0:
errors.append(
f'Property {property_name}: dictionary must include keys {expected_keys}. '
f'Next keys are missed {difference}'
)
continue
property_output_type = label_property['output_type']
if property_output_type in deprecated_output_types:
list_of_warnings.append(
f'Property {property_name}: output types {deprecated_output_types} are deprecated, '
f'use instead {output_types}'
)
elif property_output_type not in output_types:
errors.append(
f'Property {property_name}: field "output_type" must be one of {output_types}, '
f'instead got {property_output_type}'
)
if len(errors) > 0:
errors = '\n+ '.join(errors)
raise runml_checksValueError(f'List of properties contains next problems:\n+ {errors}')
if len(list_of_warnings) > 0:
concatenated_warnings = '\n+ '.join(list_of_warnings)
warnings.warn(
f'Property Warnings:\n+ {concatenated_warnings}',
category=DeprecationWarning
)
return properties
def get_column_type(output_type):
"""Get column type to use in drift functions."""
# TODO smarter mapping based on data?
# NOTE/TODO: this function is kept only for backward compatibility, remove it later
mapper = {
'continuous': 'numerical',
'discrete': 'categorical',
'class_id': 'categorical',
'numerical': 'numerical',
'categorical': 'categorical',
}
return mapper[output_type]
def properties_flatten(in_list: Sequence) -> List:
"""Flatten a list of lists into a single level list."""
out = []
for el in in_list:
if isinstance(el, Sequence) and not isinstance(el, (str, bytes)):
out.extend(el)
else:
out.append(el)
return out | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/utils/label_prediction_properties.py | 0.873363 | 0.750244 | label_prediction_properties.py | pypi |
"""Module for defining detection encoders."""
from collections import Counter
from typing import Iterable, List, Sequence, Tuple, Union
import numpy as np
import torch
from PIL.Image import Image
__all__ = ['verify_bbox_format_notation', 'convert_batch_of_bboxes', 'convert_bbox', 'DEFAULT_PREDICTION_FORMAT']
DEFAULT_PREDICTION_FORMAT = 'xywhsl'
def verify_bbox_format_notation(notation: str) -> Tuple[bool, List[str]]:
"""Verify and tokenize bbox format notation.
Parameters
----------
notation : str
format notation to verify and to tokenize
Returns
-------
Tuple[
bool,
List[Literal['label', 'score', 'width', 'height', 'xmin', 'ymin', 'xmax', 'ymax', 'xcenter', 'ycenter']]
]
first item indicates whether coordinates are normalized or not,
second represents format of the bbox
"""
tokens = []
are_coordinates_normalized = False
current = notation = notation.strip().lower()
current_pos = 0
while current:
if current.startswith('l'):
tokens.append('l')
current = current[1:]
current_pos = current_pos + 1
elif current.startswith('s'):
tokens.append('s')
current = current[1:]
current_pos = current_pos + 1
elif current.startswith('wh'):
tokens.append('wh')
current = current[2:]
current_pos = current_pos + 2
elif current.startswith('xy'):
tokens.append('xy')
current = current[2:]
current_pos = current_pos + 2
elif current.startswith('cxcy'):
tokens.append('cxcy')
current = current[4:]
current_pos = current_pos + 4
elif current.startswith('n') and current_pos == 0:
are_coordinates_normalized = True
current = current[1:]
current_pos = current_pos + 1
elif current.startswith('n') and (current_pos + 1) == len(notation):
are_coordinates_normalized = True
current_pos = current_pos + 1
break
else:
raise ValueError(
f'Wrong bbox format notation - {notation}. '
f'Incorrect or unknown sequence of charecters starting from position {current_pos} '
f'(sequence: ...{notation[current_pos:]}'
)
received_combination = Counter(tokens)
allowed_combinations = [
{'l': 1, 'xy': 2},
{'l': 1, 'xy': 1, 'wh': 1},
{'l': 1, 'cxcy': 1, 'wh': 1}
]
# All allowed combinations are also allowed with or without score to support both label and prediction
allowed_combinations += [{**c, 's': 1} for c in allowed_combinations]
if sum(c == received_combination for c in allowed_combinations) != 1:
raise ValueError(
f'Incorrect bbox format notation - {notation}.\n'
'Only next combinations of elements are allowed:\n'
'+ lxyxy (label, upper-left corner, bottom-right corner)\n'
'+ lxywh (label, upper-left corner, bbox width and height)\n'
'+ lcxcywh (label, bbox center, bbox width and height)\n'
'+ lcxcywhn (label, normalized bbox center, bbox width and height)\n\n'
''
'Note:\n'
'- notation elements (l, xy, cxcy, wh) can be placed in any order '
'but only above combinations of elements are allowed\n'
'- "n" at the begining or at the ned of the notation indicates '
'normalized coordinates\n'
)
normalized_tokens = []
for t in tokens:
if t == 'l':
normalized_tokens.append('label')
elif t == 's':
normalized_tokens.append('score')
elif t == 'wh':
normalized_tokens.extend(('width', 'height'))
elif t == 'cxcy':
normalized_tokens.extend(('xcenter', 'ycenter'))
elif t == 'xy':
if 'xmin' not in normalized_tokens and 'ymin' not in normalized_tokens:
normalized_tokens.extend(('xmin', 'ymin'))
else:
normalized_tokens.extend(('xmax', 'ymax'))
else:
raise RuntimeError('Internal Error! Unreachable part of code reached')
return are_coordinates_normalized, normalized_tokens
_BatchOfSamples = Iterable[
Tuple[
Union[Image, np.ndarray, torch.Tensor], # image
Sequence[Sequence[Union[int, float]]] # bboxes
]
]
def convert_batch_of_bboxes(
batch: _BatchOfSamples,
notation: str,
device: Union[str, torch.device, None] = None
) -> List[torch.Tensor]:
"""Convert batch of bboxes to the required format.
Parameters
----------
batch : iterable of tuple like object with two items - image, list of bboxes
batch of images and bboxes
notation : str
bboxes format notation
device : Union[str, torch.device, None], default: None
device for use
Returns
-------
List[torch.Tensor]
list of transformed bboxes
"""
are_coordinates_normalized, notation_tokens = verify_bbox_format_notation(notation)
output = []
for image, bboxes in batch:
if len(bboxes) == 0:
# image does not have bboxes
output.append(torch.tensor([]))
continue
if are_coordinates_normalized is False:
image_height = None
image_width = None
elif isinstance(image, Image):
image_height, image_width = image.height, image.width
elif isinstance(image, (np.ndarray, torch.Tensor)):
image_height, image_width, *_ = image.shape
else:
raise TypeError(
'Do not know how to take dimension sizes of '
f'object of type - {type(image)}'
)
r = []
for bbox in bboxes:
if len(bbox) < 5:
raise ValueError('incorrect bbox') # TODO: better message
else:
r.append(_convert_bbox(
bbox,
notation_tokens,
device=device,
image_width=image_width,
image_height=image_height,
))
output.append(torch.stack(r, dim=0))
return output
def convert_bbox(
bbox: Sequence[Union[int, float]],
notation: str,
image_width: Union[int, float, None] = None,
image_height: Union[int, float, None] = None,
device: Union[str, torch.device, None] = None,
_strict: bool = True # pylint: disable=invalid-name
) -> torch.Tensor:
"""Convert bbox to the required format.
Parameters
----------
bbox : Sequence[Sequence[Union[int, float]]]
bbox to transform
notation : str
bboxes format notation
image_width : Union[int, float, None], default: None
width of the image to denormalize bbox coordinates
image_height : Union[int, float, None], default: None
height of the image to denormalize bbox coordinates
device : Union[str, torch.device, None], default: None
device for use
Returns
-------
torch.Tensor
bbox transformed to the required by runml_checks format
"""
if len(bbox) < 5:
raise ValueError('incorrect bbox') # TODO: better message
are_coordinates_normalized, notation_tokens = verify_bbox_format_notation(notation)
if (
are_coordinates_normalized is True
and (image_height is None or image_width is None)
):
raise ValueError(
'bbox format notation indicates that coordinates of the bbox '
'are normalized but \'image_height\' and \'image_width\' parameters '
'were not provided. Please pass image height and width parameters '
'or remove \'n\' element from the format notation.'
)
if (
are_coordinates_normalized is False
and (image_height is not None or image_width is not None)
):
if _strict is True:
raise ValueError(
'bbox format notation indicates that coordinates of the bbox '
'are not normalized but \'image_height\' and \'image_width\' were provided. '
'Those parameters are redundant in the case when bbox coordinates are not '
'normalized. Please remove those parameters or add \'n\' element to the format '
'notation to indicate that coordinates are indeed normalized.'
)
else:
image_height = None
image_width = None
return _convert_bbox(
bbox,
notation_tokens,
image_width,
image_height,
device,
)
def _convert_bbox(
bbox: Sequence[Union[int, float]],
notation_tokens: List[str],
image_width: Union[int, float, None] = None,
image_height: Union[int, float, None] = None,
device: Union[str, torch.device, None] = None
) -> torch.Tensor:
assert \
(image_width is not None and image_height is not None) \
or (image_width is None and image_height is None)
data = dict(zip(notation_tokens, bbox))
if 'xcenter' in data and 'ycenter' in data:
if image_width is not None and image_height is not None:
xcenter, ycenter = data['xcenter'] * image_width, data['ycenter'] * image_height
else:
xcenter, ycenter = data['xcenter'], data['ycenter']
return torch.tensor([
data['label'],
xcenter - (data['width'] / 2),
ycenter - (data['height'] / 2),
data['width'],
data['height'],
], device=device)
elif 'height' in data and 'width' in data:
if image_width is not None and image_height is not None:
xmin, ymin = data['xmin'] * image_width, data['ymin'] * image_height
else:
xmin, ymin = data['xmin'], data['ymin']
return torch.tensor([
data['label'],
xmin,
ymin,
data['width'],
data['height'],
], device=device)
else:
if image_width is not None and image_height is not None:
xmin, ymin = data['xmin'] * image_width, data['ymin'] * image_height
xmax, ymax = data['xmax'] * image_width, data['ymax'] * image_height
else:
xmin, ymin = data['xmin'], data['ymin']
xmax, ymax = data['xmax'], data['ymax']
return torch.tensor([
data['label'],
xmin,
ymin,
xmax - xmin,
ymax - ymin,
], device=device) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/utils/detection_formatters.py | 0.883132 | 0.499634 | detection_formatters.py | pypi |
"""Module for defining functions related to image data."""
import io
import typing as t
import cv2
import numpy as np
import PIL.Image as pilimage
import PIL.ImageDraw as pildraw
import PIL.ImageOps as pilops
import plotly.graph_objects as go
import torch
from runml_checks.core.errors import runml_checksValueError
from runml_checks.utils.html import imagetag
from .detection_formatters import convert_bbox
__all__ = ['ImageInfo', 'numpy_grayscale_to_heatmap_figure', 'ensure_image',
'apply_heatmap_image_properties', 'draw_bboxes', 'prepare_thumbnail',
'crop_image']
class ImageInfo:
"""Class with methods defined to extract metadata about image."""
def __init__(self, img):
if not isinstance(img, np.ndarray):
raise runml_checksValueError('Expect image to be numpy array')
self.img = img
def get_size(self) -> t.Tuple[int, int]:
"""Get size of image as (width, height) tuple."""
return self.img.shape[1], self.img.shape[0]
def get_dimension(self) -> int:
"""Return the number of dimensions of the image (grayscale = 1, RGB = 3)."""
return self.img.shape[2]
def is_equals(self, img_b) -> bool:
"""Compare image to another image for equality."""
return np.array_equal(self.img, img_b)
def ensure_image(
image: t.Union[pilimage.Image, np.ndarray, torch.Tensor],
copy: bool = True
) -> pilimage.Image:
"""Transform to `PIL.Image.Image` if possible.
Parameters
----------
image : Union[PIL.Image.Image, numpy.ndarray, torch.Tensor]
copy : bool, default True
if `image` is an instance of the `PIL.Image.Image` return
it as it is or copy it.
Returns
-------
`PIL.Image.Image`
"""
if isinstance(image, pilimage.Image):
return image.copy() if copy is True else image
if isinstance(image, torch.Tensor):
image = t.cast(np.ndarray, image.numpy())
if isinstance(image, np.ndarray):
image = image.squeeze().astype(np.uint8)
if image.ndim == 3:
return pilimage.fromarray(image)
elif image.ndim == 2:
return pilops.colorize(
pilimage.fromarray(image),
black='black',
white='white',
blackpoint=image.min(),
whitepoint=image.max(),
)
else:
raise ValueError(f'Do not know how to work with {image.ndim} dimensional images')
else:
raise TypeError(f'cannot convert {type(image)} to the PIL.Image.Image')
def draw_bboxes(
image: t.Union[pilimage.Image, np.ndarray, torch.Tensor],
bboxes: t.Union[np.ndarray, torch.Tensor],
bbox_notation: t.Optional[str] = None,
copy_image: bool = True,
border_width: int = 1,
color: t.Union[str, t.Dict[np.number, str]] = 'red',
) -> pilimage.Image:
"""Draw bboxes on the image.
Parameters
----------
image : Union[PIL.Image.Image, numpy.ndarray, torch.Tensor]
image to draw on
bboxes : Union[numpy.ndarray, torch.Tensor]
array of bboxes
bbox_notation : Optional[str], default None
format of the provided bboxes
copy_image : bool, default True
copy image before drawing or not
border_width : int, default 1
width of the bbox outline
color: Union[str, Dict[number, str]], default "red"
color of the bbox outline. It could be a map mapping class id to the color
Returns
-------
PIL.Image.Image : image instance with drawen bboxes on it
"""
image = ensure_image(image, copy=copy_image)
if bbox_notation is not None:
bboxes = np.array([
convert_bbox(
bbox,
notation=bbox_notation,
image_width=image.width,
image_height=image.height,
_strict=False
).tolist()
for bbox in bboxes
])
draw = pildraw.ImageDraw(image)
for bbox in bboxes:
clazz, x0, y0, w, h = bbox.tolist()
x1, y1 = x0 + w, y0 + h
if isinstance(color, str):
color_to_use = color
elif isinstance(color, dict):
color_to_use = color[clazz]
else:
raise TypeError('color must be of type - Union[str, Dict[int, str]]')
draw.rectangle(xy=(x0, y0, x1, y1), width=border_width, outline=color_to_use)
draw.text(xy=(x0 + (w * 0.5), y0 + (h * 0.2)), text=str(clazz), fill=color_to_use)
return image
def prepare_thumbnail(
image: t.Union[pilimage.Image, np.ndarray, torch.Tensor],
size: t.Optional[t.Tuple[int, int]] = None,
copy_image: bool = True,
) -> str:
"""Prepare html image tag with the provided image.
Parameters
----------
image : Union[PIL.Image.Image, numpy.ndarray, torch.Tensor]
image to use
size : Optional[Tuple[int, int]], default None
size to which image should be rescaled
copy_image : bool, default True
to rescale the image to the provided size this function uses
`PIL.Image.Image.thumbnail` method that modified image instance
in-place. If `copy_image` is set to True image will be copied
before rescaling.
Returns
-------
str : html '<img>' tag with embedded image
"""
if size is not None:
image = ensure_image(image, copy=copy_image)
# First define the correct size with respect to the original aspect ratio
width_factor = size[0] / image.size[0]
height_factor = size[1] / image.size[1]
# Takes the minimum factor in order for the image to not exceed the size in either width or height
factor = min(width_factor, height_factor)
size = (int(image.size[0] * factor), int(image.size[1] * factor))
# Resize the image
image = image.resize(size, pilimage.ANTIALIAS)
else:
image = ensure_image(image, copy=False)
img_bytes = io.BytesIO()
image.save(img_bytes, optimize=True, quality=60, format='jpeg')
img_bytes.seek(0)
tag = imagetag(img_bytes.read())
img_bytes.close()
return tag
def numpy_grayscale_to_heatmap_figure(data: np.ndarray):
"""Create heatmap graph object from given numpy array data."""
dimension = data.shape[2]
if dimension == 3:
data = cv2.cvtColor(data, cv2.COLOR_RGB2GRAY)
elif dimension != 1:
raise runml_checksValueError(f'Don\'t know to plot images with {dimension} dimensions')
return go.Heatmap(z=data.squeeze(), hoverinfo='skip', coloraxis='coloraxis')
def apply_heatmap_image_properties(fig):
"""For heatmap and grayscale images, need to add those properties which on Image exists automatically."""
fig.update_yaxes(autorange='reversed', constrain='domain')
fig.update_xaxes(constrain='domain')
def crop_image(img: np.ndarray, x, y, w, h) -> np.ndarray:
"""Return the cropped numpy array image by x, y, w, h coordinates (top left corner, width and height."""
# Convert x, y, w, h to integers if not integers already:
x, y, w, h = [round(n) for n in [x, y, w, h]]
# Make sure w, h don't extend the bounding box outside of image dimensions:
h = min(h, img.shape[0] - y - 1)
w = min(w, img.shape[1] - x - 1)
return img[y:y + h, x:x + w] | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/utils/image_functions.py | 0.944112 | 0.569494 | image_functions.py | pypi |
"""Module for validation of the vision module."""
import os
import random
import traceback
import imgaug
import numpy as np
import torch
from IPython.display import HTML, display
from runml_checks.core.errors import ValidationError
from runml_checks.utils.ipython import is_headless, is_notebook
from runml_checks.utils.strings import create_new_file_name
from runml_checks.vision.batch_wrapper import apply_to_tensor
from runml_checks.vision.utils.detection_formatters import DEFAULT_PREDICTION_FORMAT
from runml_checks.vision.utils.image_functions import draw_bboxes, ensure_image, prepare_thumbnail
from runml_checks.vision.vision_data import TaskType, VisionData
__all__ = ['set_seeds', 'validate_extractors']
def set_seeds(seed: int):
"""Set seeds for reproducibility.
Imgaug uses numpy's State
Albumentation uses Python and imgaug seeds
Parameters
----------
seed : int
Seed to be set
"""
if seed is not None:
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
imgaug.seed(seed)
def validate_extractors(dataset: VisionData, model, device=None, image_save_location: str = None,
save_images: bool = True):
"""Validate for given data_loader and model that the extractors are valid.
Parameters
----------
dataset : VisionData
the dataset to validate.
model :
the model to validate.
device : torch.device
device to run model on
image_save_location : str , default: None
if location is given and the machine doesn't support GUI,
the images will be saved there.
save_images : bool , default: True
if the machine doesn't support GUI the displayed images will be saved
if the value is True.
"""
print('runml_checks will try to validate the extractors given...')
batch = apply_to_tensor(next(iter(dataset.data_loader)), lambda it: it.to(device))
images = None
labels = None
predictions = None
label_formatter_error = None
image_formatter_error = None
prediction_formatter_error = None
device = device or torch.device('cpu')
try:
dataset.validate_label(batch)
labels = dataset.batch_to_labels(batch)
except ValidationError as ex:
label_formatter_error = 'Fail! ' + str(ex)
except Exception: # pylint: disable=broad-except
label_formatter_error = 'Got exception \n' + traceback.format_exc()
try:
dataset.validate_image_data(batch)
images = dataset.batch_to_images(batch)
except ValidationError as ex:
image_formatter_error = 'Fail! ' + str(ex)
except Exception: # pylint: disable=broad-except
image_formatter_error = 'Got exception \n' + traceback.format_exc()
try:
dataset.validate_prediction(batch, model, device)
predictions = dataset.infer_on_batch(batch, model, device)
except ValidationError as ex:
prediction_formatter_error = str(ex)
except Exception: # pylint: disable=broad-except
prediction_formatter_error = 'Got exception \n' + traceback.format_exc()
# Classes
if label_formatter_error is None:
classes = dataset.get_classes(labels)
else:
classes = None
# Plot
if image_formatter_error is None:
image = ensure_image(images[0], copy=False)
image_title = 'Visual example of an image.'
if dataset.task_type == TaskType.OBJECT_DETECTION:
if label_formatter_error is None:
image = draw_bboxes(image, labels[0], copy_image=False)
if prediction_formatter_error is None:
image = draw_bboxes(image, predictions[0], copy_image=False, color='blue',
bbox_notation=DEFAULT_PREDICTION_FORMAT)
if label_formatter_error is None and prediction_formatter_error is None:
image_title = 'Visual examples of an image with prediction and label data. Label is red, ' \
'prediction is blue, and runml_checks loves you.'
elif label_formatter_error is None:
image_title = 'Visual example of an image with label data. Could not display prediction.'
elif prediction_formatter_error is None:
image_title = 'Visual example of an image with prediction data. Could not display label.'
else:
image_title = 'Visual example of an image. Could not display label or prediction.'
elif dataset.task_type == TaskType.CLASSIFICATION:
if label_formatter_error is None:
image_title += f' Label class {labels[0]}'
if prediction_formatter_error is None:
pred_class = predictions[0].argmax()
image_title += f' Prediction class {pred_class}'
else:
image = None
image_title = None
def get_header(x):
if is_notebook():
return f'<h4>{x}</h4>'
else:
return x + '\n' + ''.join(['-'] * len(x)) + '\n'
line_break = '<br>' if is_notebook() else '\n'
msg = get_header('Structure validation')
msg += f'Label formatter: {label_formatter_error if label_formatter_error else "Pass!"}{line_break}'
msg += f'Prediction formatter: {prediction_formatter_error if prediction_formatter_error else "Pass!"}{line_break}'
msg += f'Image formatter: {image_formatter_error if image_formatter_error else "Pass!"}{line_break}'
msg += line_break
msg += get_header('Content validation')
msg += 'For validating the content within the structure you have to manually observe the classes, image, label ' \
f'and prediction.{line_break}'
msg += 'Examples of classes observed in the batch\'s labels: '
if classes:
msg += f'{classes[:5]}{line_break}'
else:
msg += f'Unable to show due to invalid label formatter.{line_break}'
if image:
if not is_notebook():
msg += 'Visual images & label & prediction: should open in a new window'
else:
msg += 'Visual images & label & prediction: Unable to show due to invalid image formatter.'
if is_notebook():
display(HTML(msg))
if image:
image_html = '<div style="display:flex;flex-direction:column;align-items:baseline;">' \
f'{prepare_thumbnail(image, size=(200,200))}<p>{image_title}</p></div>'
display(HTML(image_html))
else:
print(msg)
if image:
if is_headless():
if save_images:
if image_save_location is None:
save_loc = os.getcwd()
else:
save_loc = image_save_location
full_image_path = os.path.join(save_loc, 'runml_checks_formatted_image.jpg')
full_image_path = create_new_file_name(full_image_path)
image.save(full_image_path)
print('*******************************************************************************')
print('This machine does not support GUI')
print('The formatted image was saved in:')
print(full_image_path)
print(image_title)
print('validate_extractors can be set to skip the image saving or change the save path')
print('*******************************************************************************')
else:
image.show() | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/utils/validation.py | 0.764496 | 0.371279 | validation.py | pypi |
"""Module containing the image formatter class for the vision module."""
import warnings
from typing import Any, Dict, List, Tuple
import numpy as np
from skimage.color import rgb2gray
from runml_checks.core.errors import runml_checksValueError
__all__ = ['default_image_properties',
'aspect_ratio',
'area',
'brightness',
'rms_contrast',
'mean_red_relative_intensity',
'mean_blue_relative_intensity',
'mean_green_relative_intensity',
'get_size',
'get_dimension',
'validate_properties',
'get_column_type']
def aspect_ratio(batch: List[np.ndarray]) -> List[float]:
"""Return list of floats of image height to width ratio."""
return [x[0] / x[1] for x in _sizes(batch)]
def area(batch: List[np.ndarray]) -> List[int]:
"""Return list of integers of image areas (height multiplied by width)."""
return [np.prod(get_size(img)) for img in batch]
def brightness(batch: List[np.ndarray]) -> List[float]:
"""Calculate brightness on each image in the batch."""
return [img.mean() if _is_grayscale(img) else rgb2gray(img).mean()
for img in batch]
def rms_contrast(batch: List[np.array]) -> List[float]:
"""Return RMS contrast of image."""
return [img.std() if _is_grayscale(img) else rgb2gray(img).std()
for img in batch]
def mean_red_relative_intensity(batch: List[np.ndarray]) -> List[float]:
"""Return the mean of the red channel relative intensity."""
return [x[0] for x in _rgb_relative_intensity_mean(batch)]
def mean_green_relative_intensity(batch: List[np.ndarray]) -> List[float]:
"""Return the mean of the green channel relative intensity."""
return [x[1] for x in _rgb_relative_intensity_mean(batch)]
def mean_blue_relative_intensity(batch: List[np.ndarray]) -> List[float]:
"""Return the mean of the blue channel relative intensity."""
return [x[2] for x in _rgb_relative_intensity_mean(batch)]
def _sizes(batch: List[np.ndarray]):
"""Return list of tuples of image height and width."""
return [get_size(img) for img in batch]
def _rgb_relative_intensity_mean(batch: List[np.ndarray]) -> List[Tuple[float, float, float]]:
"""Calculate normalized mean for each channel (rgb) in image.
The normalized mean of each channel is calculated by first normalizing the image's pixels (meaning, each color
is normalized to its relevant intensity, by dividing the color intensity by the other colors). Then, the mean
for each image channel is calculated.
Parameters
----------
batch: List[np.ndarray]
A list of arrays, each arrays represents an image in the required runml_checks format.
Returns
-------
List[np.ndarray]:
List of 3-dimensional arrays, each dimension is the normalized mean of the color channel. An array is
returned for each image.
"""
return [_normalize_pixelwise(img).mean(axis=(1, 2)) if not _is_grayscale(img) else (None, None, None)
for img in batch]
def _normalize_pixelwise(img: np.ndarray) -> np.ndarray:
"""Normalize the pixel values of an image.
Parameters
----------
img: np.ndarray
The image to normalize.
Returns
-------
np.ndarray
The normalized image.
"""
s = img.sum(axis=2)
return np.array([np.divide(img[:, :, i], s, out=np.zeros_like(img[:, :, i], dtype='float64'), where=s != 0)
for i in range(3)])
def _is_grayscale(img):
return get_dimension(img) == 1
def get_size(img) -> Tuple[int, int]:
"""Get size of image as (height, width) tuple."""
return img.shape[0], img.shape[1]
def get_dimension(img) -> int:
"""Return the number of dimensions of the image (grayscale = 1, RGB = 3)."""
return img.shape[2]
default_image_properties = [
{'name': 'Aspect Ratio', 'method': aspect_ratio, 'output_type': 'numerical'},
{'name': 'Area', 'method': area, 'output_type': 'numerical'},
{'name': 'Brightness', 'method': brightness, 'output_type': 'numerical'},
{'name': 'RMS Contrast', 'method': rms_contrast, 'output_type': 'numerical'},
{'name': 'Mean Red Relative Intensity', 'method': mean_red_relative_intensity, 'output_type': 'numerical'},
{'name': 'Mean Green Relative Intensity', 'method': mean_green_relative_intensity, 'output_type': 'numerical'},
{'name': 'Mean Blue Relative Intensity', 'method': mean_blue_relative_intensity, 'output_type': 'numerical'}
]
def validate_properties(properties: List[Dict[str, Any]]):
"""Validate structure of measurements."""
if not isinstance(properties, list):
raise runml_checksValueError(
'Expected properties to be a list, '
f'instead got {type(properties).__name__}'
)
if len(properties) == 0:
raise runml_checksValueError('Properties list can\'t be empty')
expected_keys = ('name', 'method', 'output_type')
deprecated_output_types = ('discrete', 'continuous')
output_types = ('categorical', 'numerical')
list_of_warnings = []
errors = []
for index, image_property in enumerate(properties):
if not isinstance(image_property, dict):
errors.append(
f'Item #{index}: property must be of type dict, '
f'and include keys {expected_keys}. Instead got {type(image_property).__name__}'
)
continue
property_name = image_property.get('name') or f'#{index}'
difference = sorted(set(expected_keys).difference(set(image_property.keys())))
if len(difference) > 0:
errors.append(
f'Property {property_name}: dictionary must include keys {expected_keys}. '
f'Next keys are missed {difference}'
)
continue
property_output_type = image_property['output_type']
if property_output_type in deprecated_output_types:
list_of_warnings.append(
f'Property {property_name}: output types {deprecated_output_types} are deprecated, '
f'use instead {output_types}'
)
elif property_output_type not in output_types:
errors.append(
f'Property {property_name}: field "output_type" must be one of {output_types}, '
f'instead got {property_output_type}'
)
if len(errors) > 0:
errors = '\n+ '.join(errors)
raise runml_checksValueError(f'List of properties contains next problems:\n+ {errors}')
if len(list_of_warnings) > 0:
concatenated_warnings = '\n+ '.join(list_of_warnings)
warnings.warn(
f'Property Warnings:\n+ {concatenated_warnings}',
category=DeprecationWarning
)
return properties
def get_column_type(output_type):
"""Get column type to use in drift functions."""
# TODO: smarter mapping based on data?
# NOTE/TODO: this function is kept only for backward compatibility, remove it later
mapper = {
'continuous': 'numerical',
'discrete': 'categorical',
'numerical': 'numerical',
'categorical': 'categorical'
}
return mapper[output_type] | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/utils/image_properties.py | 0.935317 | 0.732629 | image_properties.py | pypi |
"""Module for defining functions related to vision transforms."""
import abc
import typing as t
from copy import copy
import albumentations
import imgaug.augmenters as iaa
import numpy as np
import torch
import torchvision.transforms as T
from PIL import Image
from runml_checks.core.errors import runml_checksNotSupportedError, runml_checksValueError
from runml_checks.vision.vision_data import TaskType
__all__ = ['get_transforms_handler', 'un_normalize_batch', 'AbstractTransformations',
'ImgaugTransformations', 'AlbumentationsTransformations']
class AbstractTransformations(abc.ABC):
"""Abstract class for supporting functions for various transforms."""
is_transforming_labels = True
@classmethod
@abc.abstractmethod
def add_augmentation_in_start(cls, aug, transforms):
"""Add given transformations to the start of given transforms object."""
raise NotImplementedError()
@classmethod
@abc.abstractmethod
def get_test_transformation(cls):
"""Get transformation which is affecting both image data and bbox."""
raise NotImplementedError()
@classmethod
@abc.abstractmethod
def get_robustness_augmentations(cls, data_dim: t.Optional[int] = 3) -> t.List[t.Any]:
"""Get default augmentations to use in robustness report check."""
raise NotImplementedError()
class ImgaugTransformations(AbstractTransformations):
"""Class containing supporting functions for imgaug transforms."""
@classmethod
def add_augmentation_in_start(cls, aug, transforms):
"""Add given transformations to the start of given transforms object."""
if not isinstance(aug, iaa.Augmenter):
raise runml_checksValueError(f'Transforms is of type imgaug, can\'t add to it type {type(aug).__name__}')
return iaa.Sequential([aug, transforms])
@classmethod
def get_test_transformation(cls):
"""Get transformation which is affecting both image data and bbox."""
return iaa.Rotate(rotate=(20, 30))
@classmethod
def get_robustness_augmentations(cls, data_dim: t.Optional[int] = 3) -> t.List[iaa.Augmenter]:
"""Get default augmentations to use in robustness report check."""
augmentations = [
# Tries to be similar to output of
# albumentations.RandomBrightnessContrast
# Exact output is difficult
iaa.Sequential([
iaa.contrast.LinearContrast([0.8, 1.2]),
iaa.color.MultiplyBrightness([0.8, 1.2])
], name='RandomBrightnessContrast'),
# mimics albumentations.ShiftScaleRotate
iaa.geometric.Affine(scale=[0.9, 1.1],
translate_percent=[-0.0625, 0.0625],
rotate=[-45, 45],
order=1,
cval=0,
mode='reflect',
name='ShiftScaleRotate')
]
if data_dim == 3:
augmentations.extend([
# mimics h(p=1.0),
iaa.WithColorspace(
to_colorspace='HSV',
from_colorspace='RGB',
children=[
# Hue
iaa.WithChannels(0, iaa.Add((-20, 20))),
# Saturation
iaa.WithChannels(1, iaa.Add((-30, 30))),
# Value
iaa.WithChannels(0, iaa.Add((-20, 20))),
],
name='HueSaturationValue'
),
# mimics albumentations.RGBShift
iaa.Add(value=[-15, 15],
per_channel=True,
name='RGBShift')
])
return augmentations
class AlbumentationsTransformations(AbstractTransformations):
"""Class containing supporting functions for albumentations transforms."""
@classmethod
def add_augmentation_in_start(cls, aug, transforms: albumentations.Compose):
"""Add given transformations to the start of given transforms object."""
if not isinstance(aug, (albumentations.Compose, albumentations.BasicTransform)):
raise runml_checksValueError(f'Transforms is of type albumentations, can\'t add to it type '
f'{type(aug).__name__}')
# Albumentations compose contains preprocessors and another metadata needed, so we can't just create a new one,
# so we need to copy it.
album_compose = copy(transforms)
album_compose.transforms = [aug, *album_compose.transforms]
return album_compose
@classmethod
def get_test_transformation(cls):
"""Get transformation which is affecting both image data and bbox."""
return albumentations.Rotate(limit=(20, 30), p=1)
@classmethod
def get_robustness_augmentations(cls, data_dim: t.Optional[int] = 3) -> t.List[albumentations.BasicTransform]:
"""Get default augmentations to use in robustness report check."""
augmentations = [
albumentations.RandomBrightnessContrast(p=1.0),
albumentations.ShiftScaleRotate(p=1.0),
]
if data_dim == 3:
augmentations.extend([
albumentations.HueSaturationValue(p=1.0),
albumentations.RGBShift(r_shift_limit=15, g_shift_limit=15, b_shift_limit=15, p=1.0)
])
return augmentations
class TorchTransformations(AbstractTransformations):
"""Class containing supporting functions for torch transforms."""
@classmethod
def add_augmentation_in_start(cls, aug, transforms: T.Compose):
"""Add given transformations to the start of given transforms object."""
if isinstance(aug, (albumentations.Compose, albumentations.BasicTransform)):
alb_aug = aug
class TorchWrapper:
def __call__(self, image):
if isinstance(image, Image.Image):
return T.ToPILImage()(alb_aug(image=np.array(image))['image'])
elif isinstance(image, torch.Tensor):
image = image.cpu().detach().numpy()
return T.ToTensor()(alb_aug(image=image)['image'])
return alb_aug(image=image)['image']
aug = TorchWrapper()
elif not isinstance(aug, torch.nn.Module):
raise runml_checksValueError(f'Transforms is of type torch, can\'t add to it type '
f'{type(aug).__name__}')
return T.Compose([aug, *transforms.transforms])
@classmethod
@abc.abstractmethod
def get_test_transformation(cls):
"""Get transformation which is affecting image data."""
return AlbumentationsTransformations.get_test_transformation()
@classmethod
@abc.abstractmethod
def get_robustness_augmentations(cls, data_dim: t.Optional[int] = 3) -> t.List[albumentations.BasicTransform]:
"""Get default augmentations to use in robustness report check."""
return AlbumentationsTransformations.get_robustness_augmentations(data_dim)
class TorchTransformationsBbox(TorchTransformations):
"""Class containing supporting functions for torch transforms (not including image shifting)."""
is_transforming_labels = False
@classmethod
def get_robustness_augmentations(cls, data_dim: t.Optional[int] = 3) -> t.List[albumentations.BasicTransform]:
"""Get default augmentations to use in robustness report check (without image shift)."""
augs = super().get_robustness_augmentations(data_dim=data_dim)
return filter(lambda aug: not isinstance(aug, albumentations.DualTransform), augs)
def get_transforms_handler(transforms, task_type: TaskType) -> t.Type[AbstractTransformations]:
"""Return the appropriate transforms handler based on type of given transforms."""
if transforms is None:
raise runml_checksNotSupportedError('Underlying Dataset instance must have transform field not None')
elif isinstance(transforms, albumentations.Compose):
return AlbumentationsTransformations
elif isinstance(transforms, T.Compose):
if task_type == TaskType.OBJECT_DETECTION:
return TorchTransformationsBbox
return TorchTransformations
elif isinstance(transforms, iaa.Augmenter):
return ImgaugTransformations
else:
raise runml_checksNotSupportedError('Currently only imgaug, albumentations and torch are supported')
def un_normalize_batch(tensor: torch.Tensor, mean: t.Sized, std: t.Sized, max_pixel_value: int = 255):
"""Apply un-normalization on a tensor in order to display an image."""
dim = len(mean)
reshape_shape = (1, 1, 1, dim)
max_pixel_value = [max_pixel_value] * dim
mean = torch.tensor(mean, device=tensor.device).reshape(reshape_shape)
std = torch.tensor(std, device=tensor.device).reshape(reshape_shape)
tensor = (tensor * std) + mean
tensor = tensor * torch.tensor(max_pixel_value, device=tensor.device).reshape(reshape_shape)
return tensor.cpu().detach().numpy() | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/utils/transformations.py | 0.942777 | 0.473596 | transformations.py | pypi |
import warnings
from typing import Any, Dict, List, Tuple
from ignite.metrics import Metric
from runml_checks.vision import Suite
from runml_checks.vision.checks import (ClassPerformance, ConfusionMatrixReport, FeatureLabelCorrelationChange,
HeatmapComparison, ImageDatasetDrift, ImagePropertyDrift, ImagePropertyOutliers,
ImageSegmentPerformance, LabelPropertyOutliers, MeanAveragePrecisionReport,
MeanAverageRecallReport, ModelErrorAnalysis, NewLabels, SimilarImageLeakage,
SimpleModelComparison, TrainTestLabelDrift, TrainTestPredictionDrift)
__all__ = ['train_test_validation', 'model_evaluation', 'full_suite', 'integrity_validation', 'data_integrity']
def train_test_validation(n_top_show: int = 5,
label_properties: List[Dict[str, Any]] = None,
image_properties: List[Dict[str, Any]] = None,
sample_size: int = None,
random_state: int = None,
**kwargs) -> Suite:
"""Suite for validating correctness of train-test split, including distribution, \
integrity and leakage checks.
List of Checks:
.. list-table:: List of Checks
:widths: 50 50
:header-rows: 1
* - Check Example
- API Reference
* - :ref:`plot_vision_new_labels`
- :class:`~runml_checks.vision.checks.train_test_validation.NewLabels`
* - :ref:`plot_vision_similar_image_leakage`
- :class:`~runml_checks.vision.checks.train_test_validation.SimilarImageLeakage`
* - :ref:`plot_vision_heatmap_comparison`
- :class:`~runml_checks.vision.checks.train_test_validation.HeatmapComparison`
* - :ref:`plot_vision_train_test_label_drift`
- :class:`~runml_checks.vision.checks.train_test_validation.TrainTestLabelDrift`
* - :ref:`plot_vision_image_property_drift`
- :class:`~runml_checks.vision.checks.train_test_validation.ImagePropertyDrift`
* - :ref:`plot_vision_image_dataset_drift`
- :class:`~runml_checks.vision.checks.train_test_validation.ImageDatasetDrift`
* - :ref:`plot_vision_feature_label_correlation_change`
- :class:`~runml_checks.vision.checks.train_test_validation.FeatureLabelCorrelationChange`
Parameters
----------
n_top_show: int, default: 5
Number of images to show for checks that show images.
label_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default runml_checks properties.
Each property is a dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str),
representing attributes of said method. 'output_type' must be one of:
- 'numeric' - for continuous ordinal outputs.
- 'categorical' - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the :ref:`vision_properties_guide`.
- 'class_id' - for properties that return the class_id. This is used because these
properties are later matched with the VisionData.label_map, if one was given.
image_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default runml_checks properties.
Each property is a dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str),
representing attributes of said method. 'output_type' must be one of:
- 'numeric' - for continuous ordinal outputs.
- 'categorical' - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the :ref:`vision_properties_guide`.
sample_size : int , default: None
Number of samples to use for checks that sample data. If none, using the default sample_size per check.
random_state: int, default: None
Random seed for all checks.
**kwargs : dict
additional arguments to pass to the checks.
Returns
-------
Suite
A Suite for validating correctness of train-test split, including distribution, \
integrity and leakage checks.
Examples
--------
>>> from runml_checks.vision.suites import train_test_validation
>>> suite = train_test_validation(n_top_show=3, sample_size=100)
>>> result = suite.run()
>>> result.show()
See Also
--------
:ref:`vision_classification_tutorial`
:ref:`vision_detection_tutorial`
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
return Suite(
'Train Test Validation Suite',
NewLabels(**kwargs).add_condition_new_label_ratio_less_or_equal(),
SimilarImageLeakage(**kwargs).add_condition_similar_images_less_or_equal(),
HeatmapComparison(**kwargs),
TrainTestLabelDrift(**kwargs).add_condition_drift_score_less_than(),
ImagePropertyDrift(**kwargs).add_condition_drift_score_less_than(),
ImageDatasetDrift(**kwargs),
FeatureLabelCorrelationChange(**kwargs).add_condition_feature_pps_difference_less_than(),
)
def model_evaluation(alternative_metrics: Dict[str, Metric] = None,
area_range: Tuple[float, float] = (32**2, 96**2),
image_properties: List[Dict[str, Any]] = None,
prediction_properties: List[Dict[str, Any]] = None,
random_state: int = 42,
**kwargs) -> Suite:
"""Suite for evaluating the model's performance over different metrics, segments, error analysis, \
comparing to baseline, and more.
List of Checks:
.. list-table:: List of Checks
:widths: 50 50
:header-rows: 1
* - Check Example
- API Reference
* - :ref:`plot_vision_class_performance`
- :class:`~runml_checks.vision.checks.model_evaluation.ClassPerformance`
* - :ref:`plot_vision_mean_average_precision_report`
- :class:`~runml_checks.vision.checks.model_evaluation.MeanAveragePrecisionReport`
* - :ref:`plot_vision_mean_average_recall_report`
- :class:`~runml_checks.vision.checks.model_evaluation.MeanAverageRecallReport`
* - :ref:`plot_vision_train_test_prediction_drift`
- :class:`~runml_checks.vision.checks.model_evaluation.TrainTestPredictionDrift`
* - :ref:`plot_vision_simple_model_comparison`
- :class:`~runml_checks.vision.checks.model_evaluation.SimpleModelComparison`
* - :ref:`plot_vision_confusion_matrix`
- :class:`~runml_checks.vision.checks.model_evaluation.ConfusionMatrixReport`
* - :ref:`plot_vision_image_segment_performance`
- :class:`~runml_checks.vision.checks.model_evaluation.ImageSegmentPerformance`
* - :ref:`plot_vision_model_error_analysis`
- :class:`~runml_checks.vision.checks.model_evaluation.ModelErrorAnalysis`
Parameters
----------
alternative_metrics : Dict[str, Metric], default: None
A dictionary of metrics, where the key is the metric name and the value is an ignite.Metric object whose score
should be used. If None are given, use the default metrics.
area_range: tuple, default: (32**2, 96**2)
Slices for small/medium/large buckets. (For object detection tasks only)
image_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default runml_checks properties.
Each property is a dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str),
representing attributes of said method. 'output_type' must be one of:
- 'numeric' - for continuous ordinal outputs.
- 'categorical' - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the :ref:`vision_properties_guide`.
prediction_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default runml_checks properties.
Each property is a dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str),
representing attributes of said method. 'output_type' must be one of:
- 'numeric' - for continuous ordinal outputs.
- 'categorical' - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the :ref:`vision_properties_guide`.
- 'class_id' - for properties that return the class_id. This is used because these
properties are later matched with the VisionData.label_map, if one was given.
random_state : int, default: 42
random seed for all checks.
**kwargs : dict
additional arguments to pass to the checks.
Returns
-------
Suite
A suite for evaluating the model's performance.
Examples
--------
>>> from runml_checks.vision.suites import model_evaluation
>>> suite = model_evaluation()
>>> result = suite.run()
>>> result.show()
See Also
--------
:ref:`vision_classification_tutorial`
:ref:`vision_detection_tutorial`
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
return Suite(
'Model Evaluation Suite',
ClassPerformance(**kwargs).add_condition_train_test_relative_degradation_less_than(),
MeanAveragePrecisionReport(**kwargs).add_condition_average_mean_average_precision_greater_than(),
MeanAverageRecallReport(**kwargs),
TrainTestPredictionDrift(**kwargs).add_condition_drift_score_less_than(),
SimpleModelComparison(**kwargs).add_condition_gain_greater_than(),
ConfusionMatrixReport(**kwargs),
ImageSegmentPerformance(**kwargs).add_condition_score_from_mean_ratio_greater_than(),
ModelErrorAnalysis(**kwargs)
)
def integrity_validation(**kwargs) -> Suite:
"""Create a suite that is meant to validate integrity of the data.
.. deprecated:: 0.7.0
`integrity_validation` is deprecated and will be removed in runml_checks 0.8 version, it is replaced by
`data_integrity` suite.
"""
warnings.warn(
'the integrity_validation suite is deprecated, use the data_integrity suite instead',
DeprecationWarning
)
return data_integrity(**kwargs)
def data_integrity(image_properties: List[Dict[str, Any]] = None,
n_show_top: int = 5,
label_properties: List[Dict[str, Any]] = None,
**kwargs) -> Suite:
"""
Create a suite that includes integrity checks.
List of Checks:
.. list-table:: List of Checks
:widths: 50 50
:header-rows: 1
* - Check Example
- API Reference
* - :ref:`plot_vision_image_property_outliers`
- :class:`~runml_checks.vision.checks.data_integrity.ImagePropertyOutliers`
* - :ref:`plot_vision_label_property_outliers`
- :class:`~runml_checks.vision.checks.model_evaluation.LabelPropertyOutliers`
Parameters
----------
image_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default runml_checks properties.
Each property is a dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str),
representing attributes of said method. 'output_type' must be one of:
- 'numeric' - for continuous ordinal outputs.
- 'categorical' - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the :ref:`vision_properties_guide`
n_show_top : int , default: 5
number of samples to show from each direction (upper limit and bottom limit)
label_properties : List[Dict[str, Any]], default: None
List of properties. Replaces the default runml_checks properties.
Each property is a dictionary with keys 'name' (str), 'method' (Callable) and 'output_type' (str),
representing attributes of said method. 'output_type' must be one of:
- 'numeric' - for continuous ordinal outputs.
- 'categorical' - for discrete, non-ordinal outputs. These can still be numbers,
but these numbers do not have inherent value.
For more on image / label properties, see the :ref:`vision_properties_guide`
**kwargs : dict
additional arguments to pass to the checks.
Returns
-------
Suite
A suite that includes integrity checks.
Examples
--------
>>> from runml_checks.vision.suites import data_integrity
>>> suite = data_integrity()
>>> result = suite.run()
>>> result.show()
See Also
--------
:ref:`vision_classification_tutorial`
:ref:`vision_detection_tutorial`
"""
args = locals()
args.pop('kwargs')
non_none_args = {k: v for k, v in args.items() if v is not None}
kwargs = {**non_none_args, **kwargs}
return Suite(
'Data Integrity Suite',
ImagePropertyOutliers(**kwargs),
LabelPropertyOutliers(**kwargs)
)
def full_suite(**kwargs) -> Suite:
"""Create a suite that includes many of the implemented checks, for a quick overview of your model and data."""
return Suite(
'Full Suite',
model_evaluation(**kwargs),
train_test_validation(**kwargs),
data_integrity(**kwargs)
) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/vision/suites/default_suites.py | 0.881717 | 0.49823 | default_suites.py | pypi |
"""Module containing the base checks."""
# pylint: disable=broad-except
import abc
import enum
from collections import OrderedDict
from typing import Any, Callable, ClassVar, Dict, List, Optional, Type, Union
from typing_extensions import TypedDict
from runml_checks.core import check_result as check_types # pylint: disable=unused-import
from runml_checks.core.condition import Condition, ConditionCategory, ConditionResult
from runml_checks.core.errors import runml_checksValueError
from runml_checks.utils.function import initvars
from runml_checks.utils.strings import get_docs_summary, split_camel_case
__all__ = [
'DatasetKind',
'BaseCheck',
'SingleDatasetBaseCheck',
'TrainTestBaseCheck',
'ModelOnlyBaseCheck',
]
class DatasetKind(enum.Enum):
"""Represents in single dataset checks, which dataset is currently worked on."""
TRAIN = 'Train'
TEST = 'Test'
class CheckMetadata(TypedDict):
name: str
params: Dict[Any, Any]
summary: str
class ReduceMixin(abc.ABC):
"""Mixin for reduce_output function."""
def reduce_output(self, check_result: 'check_types.CheckResult') -> Dict[str, float]:
"""Return the check result as a reduced dict. Being Used for monitoring.
Parameters
----------
check_result : CheckResult
The check result.
Returns
-------
Dict[str, float]
reduced dictionary in format {str: float} (i.e {'AUC': 0.1}), based on the check's original returned value
"""
raise NotImplementedError('Must implement reduce_output function')
class BaseCheck(abc.ABC):
"""Base class for check."""
_conditions: OrderedDict
_conditions_index: int
def __init__(self, **kwargs): # pylint: disable=unused-argument
self._conditions = OrderedDict()
self._conditions_index = 0
@abc.abstractmethod
def run(self, *args, **kwargs) -> 'check_types.CheckResult':
"""Run Check."""
raise NotImplementedError()
def conditions_decision(self, result: 'check_types.CheckResult') -> List[ConditionResult]:
"""Run conditions on given result."""
results = []
condition: Condition
for condition in self._conditions.values():
try:
output = condition.function(result.value, **condition.params)
except Exception as e:
msg = f'Exception in condition: {e.__class__.__name__}: {str(e)}'
output = ConditionResult(ConditionCategory.ERROR, msg)
if isinstance(output, bool):
output = ConditionResult(ConditionCategory.PASS if output else ConditionCategory.FAIL)
elif not isinstance(output, ConditionResult):
raise runml_checksValueError(f'Invalid return type from condition {condition.name}, got: {type(output)}')
output.set_name(condition.name)
results.append(output)
return results
def add_condition(self, name: str, condition_func: Callable[[Any], Union[ConditionResult, bool]], **params):
"""Add new condition function to the check.
Parameters
----------
name : str
Name of the condition. should explain the condition action and parameters
condition_func : Callable[[Any], Union[List[ConditionResult], bool]]
Function which gets the value of the check and returns object of List[ConditionResult] or boolean.
params : dict
Additional parameters to pass when calling the condition function.
"""
cond = Condition(name, condition_func, params)
self._conditions[self._conditions_index] = cond
self._conditions_index += 1
return self
def clean_conditions(self):
"""Remove all conditions from this check instance."""
self._conditions.clear()
self._conditions_index = 0
def remove_condition(self, index: int):
"""Remove given condition by index.
Parameters
----------
index : int
index of condtion to remove
"""
if index not in self._conditions:
raise runml_checksValueError(f'Index {index} of conditions does not exists')
self._conditions.pop(index)
def params(self, show_defaults: bool = False) -> Dict:
"""Return parameters to show when printing the check."""
return initvars(self, show_defaults)
@classmethod
def name(cls) -> str:
"""Name of class in split camel case."""
return split_camel_case(cls.__name__)
def metadata(self, with_doc_link: bool = False) -> CheckMetadata:
"""Return check metadata.
Parameters
----------
with_doc_link : bool, default False
whethere to include doc link in summary or not
Returns
-------
Dict[str, Any]
"""
return CheckMetadata(
name=self.name(),
params=self.params(show_defaults=True),
summary=get_docs_summary(self, with_doc_link)
)
def __repr__(self, tabs=0, prefix=''):
"""Representation of check as string.
Parameters
----------
tabs : int , default: 0
number of tabs to shift by the output
prefix
"""
tab_chr = '\t'
params = self.params()
if params:
params_str = ', '.join([f'{k}={v}' for k, v in params.items()])
params_str = f'({params_str})'
else:
params_str = ''
name = prefix + self.__class__.__name__
check_str = f'{tab_chr * tabs}{name}{params_str}'
if self._conditions:
conditions_str = ''.join([f'\n{tab_chr * (tabs + 2)}{i}: {s.name}' for i, s in self._conditions.items()])
return f'{check_str}\n{tab_chr * (tabs + 1)}Conditions:{conditions_str}'
else:
return check_str
class SingleDatasetBaseCheck(BaseCheck):
"""Parent class for checks that only use one dataset."""
context_type: ClassVar[Optional[Type[Any]]] = None # TODO: Base context type
@abc.abstractmethod
def run(self, dataset, model=None, **kwargs) -> 'check_types.CheckResult':
"""Run check."""
raise NotImplementedError()
class TrainTestBaseCheck(BaseCheck):
"""Parent class for checks that compare two datasets.
The class checks train dataset and test dataset for model training and test.
"""
context_type: ClassVar[Optional[Type[Any]]] = None # TODO: Base context type
@abc.abstractmethod
def run(self, train_dataset, test_dataset, model=None, **kwargs) -> 'check_types.CheckResult':
"""Run check."""
raise NotImplementedError()
class ModelOnlyBaseCheck(BaseCheck):
"""Parent class for checks that only use a model and no datasets."""
context_type: ClassVar[Optional[Type[Any]]] = None # TODO: Base context type
@abc.abstractmethod
def run(self, model, **kwargs) -> 'check_types.CheckResult':
"""Run check."""
raise NotImplementedError() | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/checks.py | 0.902789 | 0.355915 | checks.py | pypi |
"""Module containing the check results classes."""
# pylint: disable=broad-except,import-outside-toplevel,unused-argument
import io
import traceback
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union, cast
import jsonpickle
import jsonpickle.ext.pandas as jsonpickle_pd
import pandas as pd
from ipywidgets import Widget
from pandas.io.formats.style import Styler
from plotly.basedatatypes import BaseFigure
from runml_checks.core.checks import ReduceMixin
from runml_checks.core.condition import ConditionCategory, ConditionResult
from runml_checks.core.display import DisplayableResult, save_as_html
from runml_checks.core.errors import runml_checksValueError
from runml_checks.core.serialization.abc import HTMLFormatter
from runml_checks.core.serialization.check_failure.html import CheckFailureSerializer as CheckFailureHtmlSerializer
from runml_checks.core.serialization.check_failure.ipython import CheckFailureSerializer as CheckFailureIPythonSerializer
from runml_checks.core.serialization.check_failure.json import CheckFailureSerializer as CheckFailureJsonSerializer
from runml_checks.core.serialization.check_failure.widget import CheckFailureSerializer as CheckFailureWidgetSerializer
from runml_checks.core.serialization.check_result.html import CheckResultSection
from runml_checks.core.serialization.check_result.html import CheckResultSerializer as CheckResultHtmlSerializer
from runml_checks.core.serialization.check_result.ipython import CheckResultSerializer as CheckResultIPythonSerializer
from runml_checks.core.serialization.check_result.json import CheckResultSerializer as CheckResultJsonSerializer
from runml_checks.core.serialization.check_result.widget import CheckResultSerializer as CheckResultWidgetSerializer
from runml_checks.utils.logger import get_logger
from runml_checks.utils.strings import widget_to_html_string
from runml_checks.utils.wandb_utils import wandb_run
# registers jsonpickle pandas extension for pandas support in the to_json function
jsonpickle_pd.register_handlers()
if TYPE_CHECKING:
from runml_checks.core.checks import BaseCheck
__all__ = ['CheckResult', 'CheckFailure', 'BaseCheckResult', 'DisplayMap']
class DisplayMap(Dict[str, List['TDisplayItem']]):
"""Class facilitating tabs within check display output."""
pass
TDisplayCallable = Callable[[], None]
TDisplayItem = Union[str, pd.DataFrame, Styler, BaseFigure, TDisplayCallable, DisplayMap]
class BaseCheckResult:
"""Generic class for any check output, contains some basic functions."""
check: Optional['BaseCheck']
header: Optional[str]
@staticmethod
def from_json(json_dict: Union[str, Dict]) -> 'BaseCheckResult':
"""Convert a json object that was returned from CheckResult.to_json or CheckFailure.to_json.
Parameters
----------
json_dict: Union[str, Dict]
Json data
Returns
-------
BaseCheckResult
A check output object.
"""
from runml_checks.core.check_json import CheckFailureJson, CheckResultJson
if isinstance(json_dict, str):
json_dict = jsonpickle.loads(json_dict)
check_type = cast(dict, json_dict)['type']
if check_type == 'CheckFailure':
return CheckFailureJson(json_dict)
elif check_type == 'CheckResult':
return CheckResultJson(json_dict)
else:
raise ValueError(
'Excpected json object to be one of [CheckFailure, CheckResult] '
f'but recievied: {check_type}'
)
def get_header(self) -> str:
"""Return header for display. if header was defined return it, else extract name of check class."""
return self.header or self.check.name()
def get_metadata(self, with_doc_link: bool = False) -> Dict:
"""Return the related check metadata."""
return {'header': self.get_header(), **self.check.metadata(with_doc_link=with_doc_link)}
def get_check_id(self, unique_id: str = '') -> str:
"""Return check id (used for href)."""
header = self.get_header().replace(' ', '')
return f'{header}_{unique_id}'
class CheckResult(BaseCheckResult, DisplayableResult):
"""Class which returns from a check with result that can later be used for automatic pipelines and display value.
Class containing the result of a check
The class stores the results and display of the check. Evaluating the result in an IPython console / notebook
will show the result display output.
Parameters
----------
value : Any
Value calculated by check. Can be used to decide if decidable check passed.
display : List[Union[Callable, str, pd.DataFrame, Styler, BaseFigure]] , default: None
Dictionary with formatters for display. possible formatters are: 'text/html', 'image/png'
header : str , default: None
Header to be displayed in python notebook.
"""
value: Any
header: Optional[str]
display: List[TDisplayItem]
conditions_results: List[ConditionResult]
def __init__(
self,
value,
header: Optional[str] = None,
display: Optional[List[TDisplayItem]] = None, # pylint: disable=redefined-outer-name
):
self.value = value
self.header = header
self.conditions_results = []
if display is not None and not isinstance(display, List):
self.display = [display]
else:
self.display = display or []
for item in self.display:
if not isinstance(item, (str, pd.DataFrame, Styler, Callable, BaseFigure, DisplayMap)):
raise runml_checksValueError(f'Can\'t display item of type: {type(item)}')
def process_conditions(self):
"""Process the conditions results from current result and check."""
self.conditions_results = self.check.conditions_decision(self)
def have_conditions(self) -> bool:
"""Return if this check has condition results."""
return bool(self.conditions_results)
def have_display(self) -> bool:
"""Return if this check has display."""
return bool(self.display)
def passed_conditions(self, fail_if_warning=True) -> bool:
"""Return if this check has no passing condition results."""
return all((r.is_pass(fail_if_warning) for r in self.conditions_results))
@property
def priority(self) -> int:
"""Return priority of the current result.
This value is primarly used to determine suite output order.
The logic is next:
* if at least one condition did not pass and is of category 'FAIL', return 1.
* if at least one condition did not pass and is of category 'WARN', return 2.
* if at least one condition did not pass and is of category 'ERROR', return 3.
* if all conditions passed, return 4.
* if check result do not have assigned conditions, return 5.
Returns
-------
int
priority of the check result.
"""
if not self.have_conditions:
return 5
for c in self.conditions_results:
if c.is_pass is False and c.category == ConditionCategory.FAIL:
return 1
if c.is_pass is False and c.category == ConditionCategory.WARN:
return 2
if c.is_pass is False and c.category == ConditionCategory.ERROR:
return 3
return 4
def reduce_output(self) -> Dict[str, float]:
"""Return the check result as a reduced dict."""
if isinstance(self.check, ReduceMixin):
return self.check.reduce_output(self)
raise runml_checksValueError('Check needs to be an instance of ReduceMixin to use this function')
@property
def widget_serializer(self) -> CheckResultWidgetSerializer:
"""Return WidgetSerializer instance."""
return CheckResultWidgetSerializer(self)
@property
def ipython_serializer(self) -> CheckResultIPythonSerializer:
"""Return IPythonSerializer instance."""
return CheckResultIPythonSerializer(self)
@property
def html_serializer(self) -> CheckResultHtmlSerializer:
"""Return HtmlSerializer instance."""
return CheckResultHtmlSerializer(self)
def display_check(
self,
unique_id: Optional[str] = None,
as_widget: bool = True,
show_additional_outputs: bool = True,
**kwargs
):
"""Display the check result or return the display as widget.
Parameters
----------
unique_id : str
unique identifier of the result output
as_widget : bool
Boolean that controls if to display the check regulary or if to return a widget.
show_additional_outputs : bool
Boolean that controls if to show additional outputs.
"""
self.show(
as_widget=as_widget,
unique_id=unique_id,
show_additional_outputs=show_additional_outputs
)
def save_as_html(
self,
file: Union[str, io.TextIOWrapper, None] = None,
unique_id: Optional[str] = None,
show_additional_outputs: bool = True,
as_widget: bool = True,
requirejs: bool = True,
**kwargs
):
"""Save a result to an HTML file.
Parameters
----------
file : filename or file-like object
the file to write the HTML output to. If None writes to output.html
unique_id : Optional[str], default None
unique identifier of the result output
show_additional_outputs : bool, default True
whether to show additional outputs or not
as_widget : bool, default True
whether to use ipywidgets or not
requirejs: bool , default: True
whether to include requirejs library into output HTML or not
Returns
-------
Optional[str] :
name of newly create file
"""
return save_as_html(
file=file,
serializer=self.widget_serializer if as_widget else self.html_serializer,
# next kwargs will be passed to serializer.serialize method
requirejs=requirejs,
output_id=unique_id,
check_sections=detalize_additional_output(show_additional_outputs)
)
def show(
self,
as_widget: bool = True,
unique_id: Optional[str] = None,
show_additional_outputs: bool = True,
**kwargs
) -> Optional[HTMLFormatter]:
"""Display the check result.
Parameters
----------
as_widget : bool, default True
whether to use ipywidgets or not
unique_id : Optional[str], default None
unique identifier of the result output
show_additional_outputs : bool, default True
whether to show additional outputs or not
Returns
-------
Optional[HTMLFormatter] :
when used by sphinx-gallery
"""
return super().show(
as_widget=as_widget,
unique_id=unique_id,
check_sections=detalize_additional_output(show_additional_outputs),
**kwargs
)
def to_widget(
self,
unique_id: Optional[str] = None,
show_additional_outputs: bool = True,
**kwargs
) -> Widget:
"""Return CheckResult as a ipywidgets.Widget instance.
Parameters
----------
unique_id : Optional[str], default None
unique identifier of the result output
show_additional_outputs : bool, default True
whether to show additional outputs or not
Returns
-------
Widget
"""
return self.widget_serializer.serialize(
output_id=unique_id,
check_sections=detalize_additional_output(show_additional_outputs)
)
def to_wandb(
self,
dedicated_run: Optional[bool] = None,
**kwargs
):
"""Send result to wandb.
Parameters
----------
dedicated_run : bool, default True
whether to create a separate wandb run or not
(deprecated parameter, does not have any effect anymore)
kwargs: Keyword arguments to pass to wandb.init.
Default project name is runml_checks.
Default config is the check metadata (params, train/test/ name etc.).
"""
# NOTE: Wandb is not a default dependency
# user should install it manually therefore we are
# doing import within method to prevent premature ImportError
assert self.check is not None
from .serialization.check_result.wandb import CheckResultSerializer as WandbSerializer
if dedicated_run is not None:
get_logger().warning(
'"dedicated_run" parameter is deprecated and does not have effect anymore. '
'It will be remove in next versions.'
)
wandb_kwargs = {'config': {'header': self.get_header(), **self.check.metadata()}}
wandb_kwargs.update(**kwargs)
with wandb_run(**wandb_kwargs) as run:
run.log(WandbSerializer(self).serialize())
def to_json(self, with_display: bool = True, **kwargs) -> str:
"""Serialize result into a json string.
Returned JSON string will have next structure:
>> class CheckResultMetadata(TypedDict):
>> type: str
>> check: CheckMetadata
>> value: Any
>> header: str
>> conditions_results: List[Dict[Any, Any]]
>> display: List[Dict[str, Any]]
>> class CheckMetadata(TypedDict):
>> name: str
>> params: Dict[Any, Any]
>> summary: str
Parameters
----------
with_display : bool
whethere to include display items or not
Returns
-------
str
"""
return jsonpickle.dumps(
CheckResultJsonSerializer(self).serialize(
with_display=with_display
),
unpicklable=False
)
def __repr__(self):
"""Return default __repr__ function uses value."""
return f'{self.get_header()}: {self.value}'
def _repr_html_(
self,
unique_id: Optional[str] = None,
show_additional_outputs: bool = True,
requirejs: bool = False,
**kwargs
) -> str:
"""Return html representation of check result."""
return widget_to_html_string(
self.to_widget(
unique_id=unique_id,
show_additional_outputs=show_additional_outputs
),
title=self.get_header(),
requirejs=requirejs
)
def _repr_json_(self, **kwargs):
return CheckResultJsonSerializer(self).serialize()
def _repr_mimebundle_(self, **kwargs):
return {
'text/html': self._repr_html_(),
'application/json': self._repr_json_()
}
def _ipython_display_(
self,
unique_id: Optional[str] = None,
as_widget: bool = True,
show_additional_outputs: bool = True
):
self.show(
unique_id=unique_id,
as_widget=as_widget,
show_additional_outputs=show_additional_outputs
)
class CheckFailure(BaseCheckResult, DisplayableResult):
"""Class which holds a check run exception.
Parameters
----------
check : BaseCheck
exception : Exception
header_suffix : str , default ``
"""
def __init__(
self,
check: 'BaseCheck',
exception: Exception,
header_suffix: str = ''
):
self.check = check
self.exception = exception
self.header = check.name() + header_suffix
@property
def widget_serializer(self) -> CheckFailureWidgetSerializer:
"""Return WidgetSerializer instance."""
return CheckFailureWidgetSerializer(self)
@property
def ipython_serializer(self) -> CheckFailureIPythonSerializer:
"""Return IPythonSerializer instance."""
return CheckFailureIPythonSerializer(self)
@property
def html_serializer(self) -> CheckFailureHtmlSerializer:
"""Return HtmlSerializer instance."""
return CheckFailureHtmlSerializer(self)
def display_check(self, as_widget: bool = True, **kwargs):
"""Display the check failure or return the display as widget.
Parameters
----------
as_widget : bool, default True
whether to use ipywidgets or not
"""
self.show(as_widget=as_widget)
def save_as_html(
self,
file: Union[str, io.TextIOWrapper, None] = None,
as_widget: bool = True,
requirejs: bool = True,
**kwargs
) -> Optional[str]:
"""Save output as html file.
Parameters
----------
file : filename or file-like object
The file to write the HTML output to. If None writes to output.html
as_widget : bool, default True
whether to use ipywidgets or not
requirejs: bool , default: True
whether to include requirejs library into output HTML or not
Returns
-------
Optional[str] :
name of newly create file
"""
return save_as_html(
file=file,
serializer=self.widget_serializer if as_widget else self.html_serializer,
requirejs=requirejs,
)
def to_widget(self, **kwargs) -> Widget:
"""Return CheckFailure as a ipywidgets.Widget instance."""
return CheckFailureWidgetSerializer(self).serialize()
def to_json(self, **kwargs):
"""Serialize CheckFailure into a json string.
Returned JSON string will have next structure:
>> class CheckFailureMetadata(TypedDict):
>> check: CheckMetadata
>> header: str
>> display: List[Dict[str, str]]
>> class CheckMetadata(TypedDict):
>> type: str
>> name: str
>> params: Dict[Any, Any]
>> summary: str
Returns
-------
str
"""
return jsonpickle.dumps(
CheckFailureJsonSerializer(self).serialize(),
unpicklable=False
)
def to_wandb(self, dedicated_run: Optional[bool] = None, **kwargs):
"""Send check result to wandb.
Parameters
----------
dedicated_run : bool, default True
whether to create a separate wandb run or not
(deprecated parameter, does not have any effect anymore)
kwargs: Keyword arguments to pass to wandb.init.
Default project name is runml_checks.
Default config is the check metadata (params, train/test/ name etc.).
"""
# NOTE: Wandb is not a default dependency
# user should install it manually therefore we are
# doing import within method to prevent premature ImportError
assert self.check is not None
from .serialization.check_failure.wandb import CheckFailureSerializer as WandbSerializer
if dedicated_run is not None:
get_logger().warning(
'"dedicated_run" parameter is deprecated and does not have effect anymore. '
'It will be remove in next versions.'
)
wandb_kwargs = {'config': {'header': self.get_header(), **self.check.metadata()}}
wandb_kwargs.update(**kwargs)
with wandb_run(**wandb_kwargs) as run:
run.log(WandbSerializer(self).serialize())
def __repr__(self):
"""Return string representation."""
return self.get_header() + ': ' + str(self.exception)
def _repr_html_(self):
return CheckFailureHtmlSerializer(self).serialize()
def _repr_json_(self):
return CheckFailureJsonSerializer(self).serialize()
def _repr_mimebundle_(self, **kwargs):
return {
'text/html': self._repr_html_(),
'application/json': self._repr_json_()
}
def print_traceback(self):
"""Print the traceback of the failure."""
print(''.join(traceback.format_exception(
etype=type(self.exception),
value=self.exception,
tb=self.exception.__traceback__
)))
def detalize_additional_output(show_additional_outputs: bool) -> List[CheckResultSection]:
return (
['condition-table', 'additional-output']
if show_additional_outputs
else ['condition-table']
) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/check_result.py | 0.935169 | 0.215929 | check_result.py | pypi |
"""Module with all runml_checks error types."""
__all__ = ['runml_checksValueError', 'runml_checksNotSupportedError', 'runml_checksProcessError',
'NumberOfFeaturesLimitError', 'DatasetValidationError', 'ModelValidationError',
'runml_checksNotImplementedError', 'ValidationError', 'runml_checksBaseError', 'NotEnoughSamplesError',
'runml_checksTimeoutError']
class runml_checksBaseError(Exception):
"""Base exception class for all 'runml_checks' error types."""
def __init__(self, message: str, html: str = None):
super().__init__(message)
self.message = message
self.html = html or message
class runml_checksValueError(runml_checksBaseError):
"""Exception class that represent a fault parameter was passed to runml_checks."""
pass
class runml_checksNotImplementedError(runml_checksBaseError):
"""Exception class that represent a function that was not implemnted."""
pass
class runml_checksNotSupportedError(runml_checksBaseError):
"""Exception class that represents an unsupported action in runml_checks."""
pass
class runml_checksProcessError(runml_checksBaseError):
"""Exception class that represents an issue with a process."""
pass
class NumberOfFeaturesLimitError(runml_checksBaseError):
"""Represents a situation when a dataset contains too many features to be used for calculation."""
pass
class runml_checksTimeoutError(runml_checksBaseError):
"""Represents a situation when a computation takes too long and is interrupted."""
pass
class ValidationError(runml_checksBaseError):
"""Represents more specific case of the ValueError (runml_checksValueError)."""
pass
class DatasetValidationError(runml_checksBaseError):
"""Represents unappropriate Dataset instance.
Should be used in a situation when a routine (like check instance, utility function, etc)
expected and received a dataset instance that did not meet routine requirements.
"""
pass
class ModelValidationError(runml_checksBaseError):
"""Represents unappropriate model instance.
Should be used in a situation when a routine (like check instance, utility function, etc)
expected and received a dataset instance that did not meet routine requirements.
"""
pass
class NotEnoughSamplesError(runml_checksBaseError):
"""Represents a failure in calculation due to insufficient amount of samples."""
pass | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/errors.py | 0.919331 | 0.405802 | errors.py | pypi |
"""Module containing the check results classes."""
# pylint: disable=super-init-not-called
import base64
import io
from typing import Any, Dict, List, Union
import jsonpickle
import pandas as pd
import plotly
from runml_checks.core.check_result import CheckFailure, CheckResult, DisplayMap
from runml_checks.core.condition import Condition, ConditionCategory, ConditionResult
from runml_checks.utils.html import imagetag
__all__ = [
'CheckResultJson',
'CheckFailureJson',
]
class FakeCheck:
def __init__(self, metadata: Dict):
self._metadata = metadata
def metadata(self, *args, **kwargs): # pylint: disable=unused-argument
return self._metadata
def name(self):
return self._metadata['name']
class CheckResultJson(CheckResult):
"""Class which returns from a check with result that can later be used for automatic pipelines and display value.
Class containing the result of a check
The class stores the results and display of the check. Evaluating the result in an IPython console / notebook
will show the result display output.
Parameters
----------
json_data: Union[str, Dict]
Json data
"""
def __init__(self, json_dict: Union[str, Dict]):
if isinstance(json_dict, str):
json_dict = jsonpickle.loads(json_dict)
self.value = json_dict.get('value')
self.header = json_dict.get('header')
self.check = FakeCheck(json_dict.get('check'))
conditions_results_json = json_dict.get('conditions_results')
if conditions_results_json is not None:
self.conditions_results = []
for condition in conditions_results_json:
cond_res = ConditionResult(ConditionCategory[condition['Status']], condition['More Info'])
cond_res.set_name(condition['Condition'])
self.conditions_results.append(cond_res)
else:
self.conditions_results = None
json_display = json_dict.get('display', [])
self.display = self._process_jsonified_display_items(json_display)
def process_conditions(self) -> List[Condition]:
"""Conditions are already processed it is to prevent errors."""
pass
@classmethod
def _process_jsonified_display_items(cls, display: List[Dict[str, Any]]) -> List[Any]:
assert isinstance(display, list)
output = []
for record in display:
display_type, payload = record['type'], record['payload']
if display_type == 'html':
output.append(payload)
elif display_type == 'dataframe':
df = pd.DataFrame.from_records(payload)
output.append(df)
elif display_type == 'plotly':
plotly_json = io.StringIO(payload)
output.append(plotly.io.read_json(plotly_json))
elif display_type == 'plt':
output.append((f'<img src=\'data:image/png;base64,{payload}\'>'))
elif display_type == 'images':
assert isinstance(payload, list)
output.extend(imagetag(base64.b64decode(it)) for it in payload)
elif display_type == 'displaymap':
assert isinstance(payload, dict)
output.append(DisplayMap(**{
k: cls._process_jsonified_display_items(v)
for k, v in payload.items()
}))
else:
raise ValueError(f'Unexpected type of display received: {display_type}')
return output
class CheckFailureJson(CheckFailure):
"""Class which holds a check run exception.
Parameters
----------
json_data: Union[str, Dict]
Json data
"""
def __init__(self, json_dict: Union[str, Dict]):
if isinstance(json_dict, str):
json_dict = jsonpickle.loads(json_dict)
self.header = json_dict.get('header')
self.check = FakeCheck(json_dict.get('check'))
self.exception = json_dict.get('exception')
def print_traceback(self):
"""Print the traceback of the failure."""
print(self.exception) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/check_json.py | 0.902371 | 0.275221 | check_json.py | pypi |
"""Module with check/suite result display strategy in different envs."""
import abc
import html
import io
import sys
import typing as t
import plotly.io as pio
from IPython.core.display import display, display_html
from ipywidgets import Widget
from runml_checks.core.serialization.abc import HTMLFormatter, HtmlSerializer, IPythonSerializer, WidgetSerializer
from runml_checks.utils.ipython import is_colab_env, is_databricks_env, is_kaggle_env, is_sagemaker_env
from runml_checks.utils.logger import get_logger
from runml_checks.utils.strings import create_new_file_name, get_random_string, widget_to_html, widget_to_html_string
if t.TYPE_CHECKING:
from wandb.sdk.data_types.base_types.wb_value import WBValue # pylint: disable=unused-import
__all__ = ['DisplayableResult', 'save_as_html', 'display_in_gui']
T = t.TypeVar('T')
class DisplayableResult(abc.ABC):
"""Display API for the check/suite result objects."""
@property
@abc.abstractmethod
def widget_serializer(self) -> WidgetSerializer[t.Any]:
"""Return WidgetSerializer instance."""
raise NotImplementedError()
@property
@abc.abstractmethod
def ipython_serializer(self) -> IPythonSerializer[t.Any]:
"""Return IPythonSerializer instance."""
raise NotImplementedError()
@property
@abc.abstractmethod
def html_serializer(self) -> HtmlSerializer[t.Any]:
"""Return HtmlSerializer instance."""
raise NotImplementedError()
def show(
self,
as_widget: bool = True,
unique_id: t.Optional[str] = None,
**kwargs
) -> t.Optional[HTMLFormatter]:
"""Display result.
Parameters
----------
as_widget : bool, default True
whether to display result with help of ipywidgets or not
unique_id : Optional[str], default None
unique identifier of the result output
**kwrgs :
other key-value arguments will be passed to the `Serializer.serialize`
method
Returns
-------
Optional[HTMLFormatter] :
when used by sphinx-gallery
"""
if 'sphinx_gallery' in pio.renderers.default:
# TODO: why we need this? add comments
html = widget_to_html_string( # pylint: disable=redefined-outer-name
self.widget_serializer.serialize(output_id=unique_id, **kwargs),
title=get_result_name(self),
requirejs=True
)
class TempSphinx:
def _repr_html_(self):
return html
return TempSphinx()
if is_kaggle_env() or is_databricks_env() or is_sagemaker_env():
self.show_in_iframe(as_widget=as_widget, unique_id=unique_id, **kwargs)
elif is_colab_env() and as_widget is True:
widget = self.widget_serializer.serialize(**kwargs)
content = widget_to_html_string(widget, title=get_result_name(self))
display_html(content, raw=True)
elif is_colab_env() and as_widget is False:
display(*self.ipython_serializer.serialize(**kwargs))
elif as_widget is True:
display_html(self.widget_serializer.serialize(
output_id=unique_id,
**kwargs
))
else:
display(*self.ipython_serializer.serialize(
output_id=unique_id,
**kwargs
))
def show_in_iframe(
self,
as_widget: bool = True,
unique_id: t.Optional[str] = None,
**kwargs
):
"""Display result in an iframe.
Parameters
----------
as_widget : bool, default True
whether to display result with help of ipywidgets or not
unique_id : Optional[str], default None
unique identifier of the result output
**kwrgs :
other key-value arguments will be passed to the `Serializer.serialize`
method
"""
output_id = unique_id or get_random_string(n=25)
if is_colab_env() and as_widget is True:
widget = self.widget_serializer.serialize(**kwargs)
content = widget_to_html_string(widget, title=get_result_name(self))
display_html(content, raw=True)
elif is_colab_env() and as_widget is False:
display(*self.ipython_serializer.serialize(**kwargs))
elif as_widget is True:
widget = self.widget_serializer.serialize(output_id=output_id, is_for_iframe_with_srcdoc=True, **kwargs)
content = widget_to_html_string(widget, title=get_result_name(self))
display_html(iframe(srcdoc=content), raw=True)
else:
display_html(
iframe(srcdoc=self.html_serializer.serialize(
output_id=output_id,
full_html=True,
include_requirejs=True,
include_plotlyjs=True,
is_for_iframe_with_srcdoc=True,
**kwargs
)),
raw=True
)
def show_in_window(self, **kwargs):
"""Display result in a separate window."""
display_in_gui(self)
def show_not_interactive(
self,
unique_id: t.Optional[str] = None,
**kwargs
):
"""Display the not interactive version of result output.
In this case, ipywidgets will not be used and plotly
figures will be transformed into png images.
Parameters
----------
unique_id : Optional[str], default None
unique identifier of the result output
**kwrgs :
other key-value arguments will be passed to the `Serializer.serialize`
method
"""
display(*self.ipython_serializer.serialize(
output_id=unique_id,
plotly_to_image=True,
**kwargs
))
def _ipython_display_(self, **kwargs):
"""Display result.."""
self.show(**kwargs)
@abc.abstractmethod
def to_widget(self, **kwargs) -> Widget:
"""Serialize result into a ipywidgets.Widget instance."""
raise NotImplementedError()
@abc.abstractmethod
def to_json(self, **kwargs) -> str:
"""Serialize result into a json string."""
raise NotImplementedError()
@abc.abstractmethod
def to_wandb(self, **kwargs) -> 'WBValue':
"""Send result to the wandb."""
raise NotImplementedError()
@abc.abstractmethod
def save_as_html(
self,
file: t.Union[str, io.TextIOWrapper, None] = None,
**kwargs
) -> t.Optional[str]:
"""Save a result to an HTML file.
Parameters
----------
file : filename or file-like object
The file to write the HTML output to. If None writes to output.html
Returns
-------
Optional[str] :
name of newly create file
"""
raise NotImplementedError()
def display_in_gui(result: DisplayableResult):
"""Display suite result or check result in a new python gui window."""
try:
from PyQt5.QtWebEngineWidgets import QWebEngineView # pylint: disable=import-outside-toplevel
from PyQt5.QtWidgets import QApplication # pylint: disable=import-outside-toplevel
except ImportError:
get_logger().error(
'Missing packages in order to display result in GUI, '
'either run "pip install pyqt5, pyqtwebengine" '
'or use "result.save_as_html()" to save result'
)
else:
try:
app = QApplication(sys.argv)
web = QWebEngineView()
web.setWindowTitle('runml_checks')
web.setGeometry(0, 0, 1200, 1200)
html_out = io.StringIO()
result.save_as_html(html_out)
web.setHtml(html_out.getvalue())
web.show()
sys.exit(app.exec_())
except BaseException: # pylint: disable=broad-except
get_logger().error(
'Unable to show result, run in an interactive environment '
'or use "result.save_as_html()" to save result'
)
def get_result_name(result) -> str:
"""Get Check/Suite result instance name."""
if hasattr(result, 'name'):
return result.name
elif hasattr(result, 'get_header') and callable(getattr(result, 'get_header')):
return result.get_header()
else:
return type(result).__name__
T = t.TypeVar('T')
def save_as_html(
serializer: t.Union[HtmlSerializer[T], WidgetSerializer[T]],
file: t.Union[str, io.TextIOWrapper, None] = None,
requirejs: bool = True,
**kwargs
) -> t.Optional[str]:
"""Save a result to an HTML file.
Parameters
----------
serializer : Union[HtmlSerializer[T], WidgetSerializer[T]]
serializer to prepare an output
file : filename or file-like object
The file to write the HTML output to. If None writes to output.html
requirejs: bool , default: True
whether to include requirejs library into output HTML or not
Returns
-------
Optional[str] :
name of newly create file
"""
if file is None:
file = 'output.html'
if isinstance(file, str):
file = create_new_file_name(file)
if isinstance(serializer, WidgetSerializer):
widget_to_html(
serializer.serialize(**kwargs),
html_out=file,
title=get_result_name(serializer.value),
requirejs=requirejs
)
elif isinstance(serializer, HtmlSerializer):
html = serializer.serialize( # pylint: disable=redefined-outer-name
full_html=True,
include_requirejs=requirejs,
include_plotlyjs=True,
**kwargs
)
if isinstance(file, str):
with open(file, 'w', encoding='utf-8') as f:
f.write(html)
elif isinstance(file, io.StringIO):
file.write(html)
else:
raise TypeError(f'Unsupported type of "file" parameter - {type(file)}')
else:
raise TypeError(f'Unsupported serializer type - {type(serializer)}')
if isinstance(file, str):
return file
def iframe(
*,
id: t.Optional[str] = None, # pylint: disable=redefined-builtin
height: str = '600px',
width: str = '100%',
allow: str = 'fullscreen',
frameborder: str = '0',
with_fullscreen_btn: bool = True,
**attributes
) -> str:
"""Return html iframe tag."""
if id is None:
id = f'runml_checks-result-iframe-{get_random_string()}'
attributes = {
'id': id,
'height': height,
'width': width,
'allow': allow,
'frameborder': frameborder,
**attributes
}
attributes = {
k: v
for k, v
in attributes.items()
if v is not None
}
if 'srcdoc' in attributes:
attributes['srcdoc'] = html.escape(attributes['srcdoc'])
attributes = '\n'.join([
f'{k}="{v}"'
for k, v in attributes.items()
])
if not with_fullscreen_btn:
return f'<iframe {attributes}></iframe>'
fullscreen_script = (
f"document.querySelector('#{id}').requestFullscreen();"
)
return f"""
<div style="display: flex; justify-content: flex-end; padding: 1rem 2rem 1rem 2rem;">
<button onclick="{fullscreen_script}" >
Full Screen
</button>
</div>
<iframe allowfullscreen {attributes}></iframe>
""" | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/display.py | 0.625324 | 0.210644 | display.py | pypi |
"""Main serialization abstractions."""
import abc
import io
import typing as t
import pandas as pd
from ipywidgets.widgets import Widget
from pandas.io.formats.style import Styler
from plotly.basedatatypes import BaseFigure
from typing_extensions import Protocol, runtime_checkable
from runml_checks.core import check_result as check_types # pylint: disable=unused-import
from runml_checks.core.serialization import common
try:
from wandb.sdk.data_types.base_types.wb_value import WBValue # pylint: disable=unused-import
except ImportError:
pass
__all__ = [
'Serializer',
'HtmlSerializer',
'JsonSerializer',
'WidgetSerializer',
'WandbSerializer',
'ABCDisplayItemsHandler'
]
T = t.TypeVar('T')
class Serializer(abc.ABC, t.Generic[T]):
"""Base protocol for all other serializers."""
value: T
def __init__(self, value: T, **kwargs):
self.value = value
class HtmlSerializer(Serializer[T]):
"""To html serializer protocol."""
@abc.abstractmethod
def serialize(self, **kwargs) -> str:
"""Serialize into html."""
raise NotImplementedError()
class JsonSerializer(Serializer[T]):
"""To json serializer protocol."""
@abc.abstractmethod
def serialize(self, **kwargs) -> t.Union[t.Dict[t.Any, t.Any], t.List[t.Any]]:
"""Serialize into json."""
raise NotImplementedError()
class WidgetSerializer(Serializer[T]):
"""To ipywidget serializer protocol."""
@abc.abstractmethod
def serialize(self, **kwargs) -> Widget:
"""Serialize into ipywidgets.Widget instance."""
raise NotImplementedError()
class WandbSerializer(Serializer[T]):
"""To wandb metadata serializer protocol."""
@abc.abstractmethod
def serialize(self, **kwargs) -> t.Dict[str, 'WBValue']:
"""Serialize into Wandb media format."""
raise NotImplementedError()
@runtime_checkable
class HTMLFormatter(Protocol):
"""An HTML formatter."""
def _repr_html_(self) -> t.Any: ...
@runtime_checkable
class MarkdownFormatter(Protocol):
"""A Markdown formatter."""
def _repr_markdown_(self) -> t.Any: ...
@runtime_checkable
class JSONFormatter(Protocol):
"""A JSON formatter."""
def _repr_json_(self) -> t.Any: ...
@runtime_checkable
class JPEGFormatter(Protocol):
"""A JPEG formatter."""
def _repr_jpeg_(self) -> t.Any: ...
@runtime_checkable
class PNGFormatter(Protocol):
"""A PNG formatter."""
def _repr_png_(self) -> t.Any: ...
@runtime_checkable
class SVGFormatter(Protocol):
"""An SVG formatter."""
def _repr_png_(self, **kwargs) -> t.Any: ...
@runtime_checkable
class IPythonDisplayFormatter(Protocol):
"""An Formatter for objects that know how to display themselves."""
def _ipython_display_(self, **kwargs) -> t.Any: ...
@runtime_checkable
class MimeBundleFormatter(Protocol):
"""A Formatter for arbitrary mime-types."""
def _repr_mimebundle_(self, **kwargs) -> t.Any: ...
# NOTE: For more info about IPython formatters API refer to the next documentation page:
# - https://ipython.readthedocs.io/en/stable/api/generated/IPython.core.formatters.html
IPythonFormatter = t.Union[
HTMLFormatter,
MarkdownFormatter,
JSONFormatter,
JPEGFormatter,
PNGFormatter,
SVGFormatter,
IPythonDisplayFormatter,
MimeBundleFormatter
]
class IPythonSerializer(Serializer[T]):
"""To IPython formatters list serializer."""
@abc.abstractmethod
def serialize(self, **kwargs) -> t.List[IPythonFormatter]:
"""Serialize into a list of objects that are Ipython displayable."""
raise NotImplementedError()
class ABCDisplayItemsHandler(Protocol):
"""Trait that describes 'CheckResult.dislay' processing logic."""
@classmethod
def supported_item_types(cls):
"""Return set of supported types of display items."""
return frozenset([
str, pd.DataFrame, Styler, BaseFigure, t.Callable, check_types.DisplayMap
])
@classmethod
@abc.abstractmethod
def handle_display(
cls,
display: t.List['check_types.TDisplayItem'],
**kwargs
) -> t.List[t.Any]:
"""Serialize list of display items.
Parameters
----------
display : List[Union[str, DataFrame, Styler, BaseFigure, Callable]]
list of display items
Returns
-------
List[Any]
"""
return [cls.handle_item(it, index, **kwargs) for index, it in enumerate(display)]
@classmethod
@abc.abstractmethod
def handle_item(cls, item: 'check_types.TDisplayItem', index: int, **kwargs) -> t.Any:
"""Serialize display item."""
if isinstance(item, str):
return cls.handle_string(item, index, **kwargs)
elif isinstance(item, (pd.DataFrame, Styler)):
return cls.handle_dataframe(item, index, **kwargs)
elif isinstance(item, BaseFigure):
return cls.handle_figure(item, index, **kwargs)
elif isinstance(item, check_types.DisplayMap):
return cls.handle_display_map(item, index, **kwargs)
elif callable(item):
return cls.handle_callable(item, index, **kwargs)
else:
raise TypeError(f'Unable to handle display item of type: {type(item)}')
@classmethod
@abc.abstractmethod
def handle_string(cls, item: str, index: int, **kwargs) -> t.Any:
"""Handle textual item."""
raise NotImplementedError()
@classmethod
@abc.abstractmethod
def handle_dataframe(cls, item: t.Union[pd.DataFrame, Styler], index: int, **kwargs) -> t.Any:
"""Handle dataframe item."""
raise NotImplementedError()
@classmethod
@abc.abstractmethod
def handle_callable(cls, item: t.Callable, index: int, **kwargs) -> t.List[io.BytesIO]:
"""Handle callable."""
# TODO: callable is a special case, add comments
with common.switch_matplot_backend('agg'):
item()
return common.read_matplot_figures()
@classmethod
@abc.abstractmethod
def handle_figure(cls, item: BaseFigure, index: int, **kwargs) -> t.Any:
"""Handle plotly figure item."""
raise NotImplementedError()
@classmethod
@abc.abstractmethod
def handle_display_map(cls, item: 'check_types.DisplayMap', index: int, **kwargs) -> t.Any:
"""Handle display map instance item."""
raise NotImplementedError() | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/serialization/abc.py | 0.859413 | 0.178723 | abc.py | pypi |
"""Module containing html serializer for the CheckResult type."""
import textwrap
import typing as t
from plotly.basedatatypes import BaseFigure
from plotly.io import to_html
from typing_extensions import Literal
from runml_checks.core import check_result as check_types
from runml_checks.core.serialization.abc import ABCDisplayItemsHandler, HtmlSerializer
from runml_checks.core.serialization.common import (aggregate_conditions, form_output_anchor, plotlyjs_script,
requirejs_script)
from runml_checks.core.serialization.dataframe.html import DataFrameSerializer as DataFrameHtmlSerializer
from runml_checks.utils.html import imagetag, linktag
__all__ = ['CheckResultSerializer']
CheckResultSection = t.Union[
Literal['condition-table'],
Literal['additional-output'],
]
class CheckResultSerializer(HtmlSerializer['check_types.CheckResult']):
"""Serializes any CheckResult instance into HTML format.
Parameters
----------
value : CheckResult
CheckResult instance that needed to be serialized.
"""
def __init__(self, value: 'check_types.CheckResult', **kwargs):
if not isinstance(value, check_types.CheckResult):
raise TypeError(
f'Expected "CheckResult" but got "{type(value).__name__}"'
)
super().__init__(value=value)
def serialize(
self,
output_id: t.Optional[str] = None,
check_sections: t.Optional[t.Sequence[CheckResultSection]] = None,
full_html: bool = False,
include_requirejs: bool = False,
include_plotlyjs: bool = True,
connected: bool = True,
plotly_to_image: bool = False,
is_for_iframe_with_srcdoc: bool = False,
**kwargs
) -> str:
"""Serialize a CheckResult instance into HTML format.
Parameters
----------
output_id : Optional[str], default None
unique output identifier that will be used to form anchor links
check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None
sequence of check result sections to include into the output,
in case of 'None' all sections will be included
full_html : bool, default False
whether to return a fully independent HTML document or only CheckResult content
include_requirejs : bool, default False
whether to include requirejs library into output or not
include_plotlyjs : bool, default True
whether to include plotlyjs library into output or not
connected : bool, default True
whether to use CDN to load js libraries or to inject their code into output
plotly_to_image : bool, default False
whether to transform Plotly figure instance into static image or not
is_for_iframe_with_srcdoc : bool, default False
anchor links, in order to work within iframe require additional prefix
'about:srcdoc'. This flag tells function whether to add that prefix to
the anchor links or not
Returns
-------
str
"""
if full_html is True:
include_plotlyjs = True
include_requirejs = True
connected = False
sections_to_include = verify_include_parameter(check_sections)
sections = [self.prepare_header(output_id), self.prepare_summary()]
if 'condition-table' in sections_to_include:
sections.append(''.join(self.prepare_conditions_table(
output_id=output_id
)))
if 'additional-output' in sections_to_include:
sections.append(''.join(self.prepare_additional_output(
output_id=output_id,
plotly_to_image=plotly_to_image,
is_for_iframe_with_srcdoc=is_for_iframe_with_srcdoc
)))
plotlyjs = plotlyjs_script(connected) if include_plotlyjs is True else ''
requirejs = requirejs_script(connected) if include_requirejs is True else ''
if full_html is False:
return ''.join([requirejs, plotlyjs, *sections])
# TODO: use some style to make it pretty
return textwrap.dedent(f"""
<html>
<head><meta charset="utf-8"/></head>
<body style="background-color: white;">
{''.join([requirejs, plotlyjs, *sections])}
</body>
</html>
""")
def prepare_header(self, output_id: t.Optional[str] = None) -> str:
"""Prepare the header section of the html output."""
header = self.value.get_header()
header = f'<b>{header}</b>'
if output_id is not None:
check_id = self.value.get_check_id(output_id)
return f'<h4 id="{check_id}">{header}</h4>'
else:
return f'<h4>{header}</h4>'
def prepare_summary(self) -> str:
"""Prepare the summary section of the html output."""
return f'<p>{self.value.get_metadata()["summary"]}</p>'
def prepare_conditions_table(
self,
max_info_len: int = 3000,
include_icon: bool = True,
include_check_name: bool = False,
output_id: t.Optional[str] = None,
) -> str:
"""Prepare the conditions table of the html output.
Parameters
----------
max_info_len : int, default 3000
max length of the additional info
include_icon : bool , default: True
if to show the html condition result icon or the enum
include_check_name : bool, default False
whether to include check name into dataframe or not
output_id : Optional[str], default None
unique output identifier that will be used to form anchor links
Returns
-------
str
"""
if not self.value.have_conditions():
return ''
table = DataFrameHtmlSerializer(aggregate_conditions(
self.value,
max_info_len=max_info_len,
include_icon=include_icon,
include_check_name=include_check_name,
output_id=output_id
)).serialize()
return f'<h5><b>Conditions Summary</b></h5>{table}'
def prepare_additional_output(
self,
output_id: t.Optional[str] = None,
plotly_to_image: bool = False,
is_for_iframe_with_srcdoc: bool = False,
) -> t.List[str]:
"""Prepare the display content of the html output.
Parameters
----------
output_id : Optional[str], default None
unique output identifier that will be used to form anchor links
plotly_to_image : bool, default False
whether to transform Plotly figure instance into static image or not
is_for_iframe_with_srcdoc : bool, default False
anchor links, in order to work within iframe require additional prefix
'about:srcdoc'. This flag tells function whether to add that prefix to
the anchor links or not
Returns
-------
str
"""
return DisplayItemsHandler.handle_display(
self.value.display,
output_id=output_id,
plotly_to_image=plotly_to_image,
is_for_iframe_with_srcdoc=is_for_iframe_with_srcdoc
)
class DisplayItemsHandler(ABCDisplayItemsHandler):
"""Auxiliary class to decouple display handling logic from other functionality."""
@classmethod
def handle_display(
cls,
display: t.List['check_types.TDisplayItem'],
output_id: t.Optional[str] = None,
is_for_iframe_with_srcdoc: bool = False,
include_header: bool = True,
include_trailing_link: bool = True,
**kwargs
) -> t.List[str]:
"""Serialize CheckResult display items into HTML.
Parameters
----------
display : List[Union[str, DataFrame, Styler, BaseFigure, Callable, DisplayMap]]
list of display items
output_id : Optional[str], default None
unique output identifier that will be used to form anchor links
is_for_iframe_with_srcdoc : bool, default False
anchor links, in order to work within iframe require additional prefix
'about:srcdoc'. This flag tells function whether to add that prefix to
the anchor links or not
include_header: bool, default True
whether to include header
include_trailing_link: bool, default True
whether to include "go to top" link
Returns
-------
List[str]
"""
output = [cls.header()] if include_header else []
output.extend(super().handle_display(display, **{'output_id': output_id, **kwargs}))
if len(display) == 0:
output.append(cls.empty_content_placeholder())
if output_id is not None and include_trailing_link:
output.append(cls.go_to_top_link(
output_id,
is_for_iframe_with_srcdoc=is_for_iframe_with_srcdoc
))
return output
@classmethod
def header(cls) -> str:
"""Return header section."""
return '<h5><b>Additional Outputs</b></h5>'
@classmethod
def empty_content_placeholder(cls) -> str:
"""Return placeholder in case of content absence."""
return '<p><b>✓</b>Nothing to display</p>'
@classmethod
def go_to_top_link(
cls,
output_id: str,
is_for_iframe_with_srcdoc: bool,
) -> str:
"""Return 'Go To Top' link."""
link = linktag(
text='Go to top',
style={'font-size': '14px'},
href=f'#{form_output_anchor(output_id)}',
is_for_iframe_with_srcdoc=is_for_iframe_with_srcdoc
)
return f'<br>{link}'
@classmethod
def handle_string(cls, item, index, **kwargs) -> str:
"""Handle textual item."""
return f'<div>{item}</div>'
@classmethod
def handle_dataframe(cls, item, index, **kwargs) -> str:
"""Handle dataframe item."""
return DataFrameHtmlSerializer(item).serialize()
@classmethod
def handle_callable(cls, item, index, **kwargs) -> str:
"""Handle callable."""
images = super().handle_callable(item, index, **kwargs)
tags = []
for buffer in images:
buffer.seek(0)
tags.append(imagetag(buffer.read()))
buffer.close()
return ''.join(tags)
@classmethod
def handle_figure(
cls,
item: BaseFigure,
index: int,
plotly_to_image: bool = False,
**kwargs
) -> str:
"""Handle plotly figure item."""
if plotly_to_image is True:
img = item.to_image(format='jpeg', engine='auto')
return imagetag(img)
post_script = textwrap.dedent("""
var gd = document.getElementById('{plot_id}');
var x = new MutationObserver(function (mutations, observer) {{
var display = window.getComputedStyle(gd).display;
if (!display || display === 'none') {{
console.log([gd, 'removed!']);
Plotly.purge(gd);
observer.disconnect();
}}
}});
// Listen for the removal of the full notebook cells
var notebookContainer = gd.closest('#notebook-container');
if (notebookContainer) {{
x.observe(notebookContainer, {childList: true});
}}
// Listen for the clearing of the current output cell
var outputEl = gd.closest('.output');
if (outputEl) {{
x.observe(outputEl, {childList: true});
}}
""")
return to_html(
item,
auto_play=False,
include_plotlyjs='require',
post_script=post_script,
full_html=False,
default_width='100%',
default_height=525,
validate=True,
)
@classmethod
def handle_display_map(cls, item: 'check_types.DisplayMap', index: int, **kwargs) -> str:
"""Handle display map instance item."""
template = textwrap.dedent("""
<details>
<summary><strong>{name}</strong></summary>
<div style="
display: flex;
flex-direction: column;
width: 100%;
padding: 1.5rem;
">
{content}
</div>
</details>
""")
return ''.join([
template.format(
name=k,
content=''.join(cls.handle_display(
v,
include_header=False,
include_trailing_link=False,
**kwargs
))
)
for k, v in item.items()
])
def verify_include_parameter(
include: t.Optional[t.Sequence[CheckResultSection]] = None
) -> t.Set[CheckResultSection]:
"""Verify CheckResultSection sequence."""
sections = t.cast(
t.Set[CheckResultSection],
{'condition-table', 'additional-output'}
)
if include is None:
sections_to_include = sections
elif len(include) == 0:
raise ValueError('include parameter cannot be empty')
else:
sections_to_include = set(include)
if len(sections_to_include.difference(sections)) > 0:
raise ValueError(
'include parameter must contain '
'Union[Literal["condition-table"], Literal["additional-output"]]'
)
return sections_to_include | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/serialization/check_result/html.py | 0.889361 | 0.18279 | html.py | pypi |
"""Module containing Wandb serializer for the CheckResult type."""
import typing as t
from collections import OrderedDict
import pandas as pd
from pandas.io.formats.style import Styler
from plotly.basedatatypes import BaseFigure
from runml_checks.core import check_result as check_types
from runml_checks.core.serialization.abc import ABCDisplayItemsHandler, WandbSerializer
from runml_checks.core.serialization.common import (aggregate_conditions, concatv_images, flatten, normalize_value,
prettify)
from runml_checks.utils.wandb_utils import WANDB_INSTALLATION_CMD
try:
import wandb
except ImportError as e:
raise ImportError(
'Wandb serializer requires the wandb python package. '
f'To get it, run - {WANDB_INSTALLATION_CMD}.'
) from e
if t.TYPE_CHECKING:
from wandb.sdk.data_types.base_types.wb_value import WBValue # pylint: disable=unused-import
__all__ = ['CheckResultSerializer']
class CheckResultSerializer(WandbSerializer['check_types.CheckResult']):
"""Serializes any CheckResult instance into Wandb media metadata.
Parameters
----------
value : CheckResult
CheckResult instance that needed to be serialized.
"""
def __init__(self, value: 'check_types.CheckResult', **kwargs):
if not isinstance(value, check_types.CheckResult):
raise TypeError(
f'Expected "CheckResult" but got "{type(value).__name__}"'
)
super().__init__(value=value)
def serialize(self, **kwargs) -> t.Dict[str, 'WBValue']:
"""Serialize a CheckResult instance into Wandb media metadata.
Returns
-------
Dict[str, WBValue]
"""
header = self.value.header
output = OrderedDict()
conditions_table = self.prepare_conditions_table()
if conditions_table is not None:
output[f'{header}/conditions table'] = conditions_table
for section_name, wbvalue in self.prepare_display():
output[f'{header}/{section_name}'] = wbvalue
output[f'{header}/results'] = self.prepare_summary_table()
return output
def prepare_summary_table(self) -> wandb.Table:
"""Prepare summary table."""
check_result = self.value
metadata = check_result.get_metadata()
return wandb.Table(
columns=['header', 'params', 'summary', 'value'],
data=[[
check_result.header,
prettify(metadata['params']),
metadata['summary'],
prettify(normalize_value(check_result.value))
]],
)
def prepare_conditions_table(self) -> t.Optional[wandb.Table]:
"""Prepare conditions table."""
if self.value.conditions_results:
df = aggregate_conditions(self.value, include_icon=False)
return wandb.Table(dataframe=df.data, allow_mixed_types=True)
def prepare_display(self) -> t.List[t.Tuple[str, 'WBValue']]:
"""Serialize display items into Wandb media format."""
return DisplayItemsHandler.handle_display(self.value.display)
class DisplayItemsHandler(ABCDisplayItemsHandler):
"""Auxiliary class to decouple display handling logic from other functionality."""
@classmethod
def handle_display(
cls,
display: t.List['check_types.TDisplayItem'],
**kwargs
) -> t.List[t.Tuple[str, 'WBValue']]:
"""Serialize list of display items to wandb data types.
Parameters
----------
display : List[Union[str, DataFrame, Styler, BaseFigure, Callable, DisplayMap]]
list of display items
Returns
-------
List[Tuple[str, 'WBValue']]
"""
return list(flatten(
l=super().handle_display(display, **kwargs),
stop=lambda it: isinstance(it, tuple) and len(it) == 2
))
@classmethod
def handle_string(cls, item: str, index: int, **kwargs) -> t.Tuple[str, 'WBValue']:
"""Handle textual item."""
return (f'item-{index}-html', wandb.Html(data=item))
@classmethod
def handle_dataframe(
cls,
item: t.Union[pd.DataFrame, Styler],
index: int,
**kwargs
) -> t.Tuple[str, 'WBValue']:
"""Handle dataframe item."""
if isinstance(item, Styler):
return (
f'item-{index}-table',
wandb.Table(dataframe=item.data.reset_index(), allow_mixed_types=True)
)
else:
return (
f'item-{index}-table',
wandb.Table(dataframe=item.reset_index(), allow_mixed_types=True)
)
@classmethod
def handle_callable(cls, item: t.Callable, index: int, **kwargs) -> t.Tuple[str, 'WBValue']:
"""Handle callable."""
try:
import PIL.Image as pilimage
except ImportError as error:
raise ImportError(
'Wandb CheckResultSerializer requires the PIL package '
'to process matplot figures. To get it, run "pip install pillow".'
) from error
else:
images = super().handle_callable(item, index, **kwargs)
image = concatv_images([pilimage.open(buffer) for buffer in images])
return (f'item-{index}-figure', wandb.Image(image))
@classmethod
def handle_figure(cls, item: BaseFigure, index: int, **kwargs) -> t.Tuple[str, 'WBValue']:
"""Handle plotly figure item."""
return f'item-{index}-plot', wandb.Plotly(item)
@classmethod
def handle_display_map(
cls,
item: 'check_types.DisplayMap',
index: int,
**kwargs
) -> t.List[t.Tuple[str, 'WBValue']]:
"""Handle display map instance item."""
return [
(
f'item-{index}-displaymap/{name}/{section_name}',
wbvalue
)
for name, display_items in item.items()
for section_name, wbvalue in cls.handle_display(display_items, **kwargs)
] | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/serialization/check_result/wandb.py | 0.925044 | 0.285531 | wandb.py | pypi |
"""Module containing ipywidget serializer for the CheckResult type."""
import typing as t
import pandas as pd
import plotly.graph_objects as go
from ipywidgets import HTML, Tab, VBox, Widget
from plotly.basedatatypes import BaseFigure
from runml_checks.core import check_result as check_types
from runml_checks.core.serialization.abc import WidgetSerializer
from runml_checks.core.serialization.common import normalize_widget_style
from . import html
__all__ = ['CheckResultSerializer']
class CheckResultSerializer(WidgetSerializer['check_types.CheckResult']):
"""Serializes any CheckResult instance into ipywidgets.Widget instance.
Parameters
----------
value : CheckResult
CheckResult instance that needed to be serialized.
"""
def __init__(self, value: 'check_types.CheckResult', **kwargs):
if not isinstance(value, check_types.CheckResult):
raise TypeError(
f'Expected "CheckResult" but got "{type(value).__name__}"'
)
super().__init__(value=value)
self._html_serializer = html.CheckResultSerializer(self.value)
def serialize(
self,
output_id: t.Optional[str] = None,
check_sections: t.Optional[t.Sequence[html.CheckResultSection]] = None,
plotly_to_image: bool = False,
is_for_iframe_with_srcdoc: bool = False,
**kwargs
) -> VBox:
"""Serialize a CheckResult instance into ipywidgets.Widget instance.
Parameters
----------
output_id : Optional[str], default None
unique output identifier that will be used to form anchor links
check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None
sequence of check result sections to include into theoutput,
in case of 'None' all sections will be included
plotly_to_image : bool, default False
whether to transform Plotly figure instance into static image or not
is_for_iframe_with_srcdoc : bool, default False
anchor links, in order to work within iframe require additional prefix
'about:srcdoc'. This flag tells function whether to add that prefix to
the anchor links or not
Returns
-------
ipywidgets.VBox
"""
sections_to_include = html.verify_include_parameter(check_sections)
sections: t.List[Widget] = [self.prepare_header(output_id), self.prepare_summary()]
if 'condition-table' in sections_to_include:
sections.append(self.prepare_conditions_table(
output_id=output_id
))
if 'additional-output' in sections_to_include:
sections.append(self.prepare_additional_output(
output_id=output_id,
plotly_to_image=plotly_to_image,
is_for_iframe_with_srcdoc=is_for_iframe_with_srcdoc
))
return normalize_widget_style(VBox(children=sections))
def prepare_header(self, output_id: t.Optional[str] = None) -> HTML:
"""Prepare header widget."""
return HTML(value=self._html_serializer.prepare_header(output_id))
def prepare_summary(self) -> HTML:
"""Prepare summary widget."""
return HTML(value=self._html_serializer.prepare_summary())
def prepare_conditions_table(
self,
max_info_len: int = 3000,
include_icon: bool = True,
include_check_name: bool = False,
output_id: t.Optional[str] = None,
) -> HTML:
"""Prepare conditions table widget.
Parameters
----------
max_info_len : int, default 3000
max length of the additional info
include_icon : bool , default: True
if to show the html condition result icon or the enum
include_check_name : bool, default False
whether to include check name into dataframe or not
output_id : Optional[str], default None
unique output identifier that will be used to form anchor links
Returns
-------
ipywidgets.HTML
"""
widget = HTML(value=self._html_serializer.prepare_conditions_table(
max_info_len=max_info_len,
include_icon=include_icon,
include_check_name=include_check_name,
output_id=output_id
))
return widget
def prepare_additional_output(
self,
output_id: t.Optional[str] = None,
plotly_to_image: bool = False,
is_for_iframe_with_srcdoc: bool = False
) -> VBox:
"""Prepare additional output widget.
Parameters
----------
output_id : Optional[str], default None
unique output identifier that will be used to form anchor links
plotly_to_image : bool, default False
whether to transform Plotly figure instance into static image or not
is_for_iframe_with_srcdoc : bool, default False
anchor links, in order to work within iframe require additional prefix
'about:srcdoc'. This flag tells function whether to add that prefix to
the anchor links or not
Returns
-------
ipywidgets.VBox
"""
return VBox(children=DisplayItemsHandler.handle_display(
display=self.value.display,
output_id=output_id,
plotly_to_image=plotly_to_image,
is_for_iframe_with_srcdoc=is_for_iframe_with_srcdoc
))
class DisplayItemsHandler(html.DisplayItemsHandler):
"""Auxiliary class to decouple display handling logic from other functionality."""
@classmethod
def handle_display(
cls,
display: t.List['check_types.TDisplayItem'],
output_id: t.Optional[str] = None,
include_header: bool = True,
include_trailing_link: bool = True,
**kwargs
) -> t.List[Widget]:
"""Serialize CheckResult display items into list if Widget instances.
Parameters
----------
display : List[Union[str, DataFrame, Styler, BaseFigure, Callable, DisplayMap]]
list of display items
output_id : Optional[str], default None
unique output identifier that will be used to form anchor links
include_header: bool, default True
whether to include header
include_trailing_link: bool, default True
whether to include "go to top" link
Returns
-------
List[Widget]
"""
return t.cast(t.List[Widget], super().handle_display(
display=display,
output_id=output_id,
include_header=include_header,
include_trailing_link=include_trailing_link,
**kwargs
))
@classmethod
def header(cls) -> HTML:
"""Return header section."""
return HTML(value=super().header())
@classmethod
def empty_content_placeholder(cls) -> HTML:
"""Return placeholder in case of content absence."""
return HTML(value=super().empty_content_placeholder())
@classmethod
def go_to_top_link(cls, output_id: str, is_for_iframe_with_srcdoc: bool) -> HTML:
"""Return 'Go To Top' link."""
return HTML(value=super().go_to_top_link(output_id, is_for_iframe_with_srcdoc))
@classmethod
def handle_figure(
cls,
item: BaseFigure,
index: int,
plotly_to_image: bool = False,
**kwargs
) -> Widget:
return (
go.FigureWidget(data=item)
if not plotly_to_image
else HTML(value=super().handle_figure(
item, index, plotly_to_image, **kwargs
))
)
@classmethod
def handle_string(cls, item: str, index: int, **kwargs) -> HTML:
"""Handle textual item."""
return HTML(value=super().handle_string(item, index, **kwargs))
@classmethod
def handle_dataframe(cls, item: pd.DataFrame, index: int, **kwargs) -> HTML:
"""Handle dataframe item."""
return HTML(value=super().handle_dataframe(item, index, **kwargs))
@classmethod
def handle_callable(cls, item: t.Callable, index: int, **kwargs) -> HTML:
"""Handle callable."""
return HTML(value=super().handle_callable(item, index, **kwargs))
@classmethod
def handle_display_map(cls, item: 'check_types.DisplayMap', index: int, **kwargs) -> VBox:
"""Handle display map instance item."""
tab = Tab()
children = []
for i, (name, display) in enumerate(item.items()):
tab.set_title(i, name)
children.append(VBox(children=cls.handle_display(
display,
include_header=False,
include_trailing_link=False,
**kwargs
)))
tab.children = children
style = '<style>.jupyter-widgets.widget-tab > .p-TabBar .p-TabBar-tab {min-width: fit-content;}</style>'
return VBox(children=[
HTML(value=style),
tab
]) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/serialization/check_result/widget.py | 0.932806 | 0.200108 | widget.py | pypi |
"""Module containing json serializer for the CheckResult type."""
import base64
import typing as t
import pandas as pd
from pandas.io.formats.style import Styler
from plotly.basedatatypes import BaseFigure
from typing_extensions import TypedDict
from runml_checks.core import check_result as check_types
from runml_checks.core import checks # pylint: disable=unused-import
from runml_checks.core.serialization.abc import ABCDisplayItemsHandler, JsonSerializer
from runml_checks.core.serialization.common import aggregate_conditions, normalize_value
__all__ = ['CheckResultSerializer']
class CheckResultMetadata(TypedDict):
type: str
check: 'checks.CheckMetadata'
value: t.Any
header: str
conditions_results: t.List[t.Dict[t.Any, t.Any]]
display: t.Optional[t.List[t.Any]]
class CheckResultSerializer(JsonSerializer['check_types.CheckResult']):
"""Serializes any CheckResult instance into JSON format.
Parameters
----------
value : CheckResult
CheckResult instance that needed to be serialized.
"""
def __init__(self, value: 'check_types.CheckResult', **kwargs):
if not isinstance(value, check_types.CheckResult):
raise TypeError(
f'Expected "CheckResult" but got "{type(value).__name__}"'
)
super().__init__(value=value)
def serialize(self, with_display: bool = True, **kwargs) -> CheckResultMetadata:
"""Serialize a CheckResult instance into JSON format.
Parameters
----------
with_display : bool
controls if to serialize display or not
Returns
-------
CheckResultMetadata
"""
display = self.prepare_display() if with_display else None
return CheckResultMetadata(
type='CheckResult',
check=self.prepare_check_metadata(),
header=self.value.get_header(),
value=self.prepare_value(),
conditions_results=self.prepare_condition_results(),
display=display
)
def prepare_check_metadata(self) -> 'checks.CheckMetadata':
"""Prepare Check instance metadata dictionary."""
assert self.value.check is not None
return self.value.check.metadata(with_doc_link=True)
def prepare_condition_results(self) -> t.List[t.Dict[t.Any, t.Any]]:
"""Serialize condition results into json."""
if self.value.have_conditions:
df = aggregate_conditions(self.value, include_icon=False)
return df.data.to_dict(orient='records')
else:
return []
def prepare_value(self) -> t.Any:
"""Serialize CheckResult value var into JSON."""
return normalize_value(self.value.value)
def prepare_display(self) -> t.List[t.Dict[str, t.Any]]:
"""Serialize CheckResult display items into JSON."""
return DisplayItemsHandler.handle_display(self.value.display)
class DisplayItemsHandler(ABCDisplayItemsHandler):
"""Auxiliary class to decouple display handling logic from other functionality."""
@classmethod
def handle_string(cls, item: str, index: int, **kwargs) -> t.Dict[str, str]:
"""Handle textual item."""
return {'type': 'html', 'payload': item}
@classmethod
def handle_dataframe(
cls,
item: t.Union[pd.DataFrame, Styler],
index: int,
**kwargs
) -> t.Dict[str, t.Any]:
"""Handle dataframe item."""
if isinstance(item, Styler):
return {
'type': 'dataframe',
'payload': item.data.to_dict(orient='records')
}
else:
return {
'type': 'dataframe',
'payload': item.to_dict(orient='records')
}
@classmethod
def handle_callable(cls, item: t.Callable, index: int, **kwargs) -> t.Dict[str, t.Any]:
"""Handle callable."""
return {
'type': 'images',
'payload': [
base64.b64encode(buffer.read()).decode('ascii')
for buffer in super().handle_callable(item, index, **kwargs)
]
}
@classmethod
def handle_figure(cls, item: BaseFigure, index: int, **kwargs) -> t.Dict[str, t.Any]:
"""Handle plotly figure item."""
return {'type': 'plotly', 'payload': item.to_json()}
@classmethod
def handle_display_map(cls, item: 'check_types.DisplayMap', index: int, **kwargs) -> t.Dict[str, t.Any]:
"""Handle display map instance item."""
return {
'type': 'displaymap',
'payload': {
k: cls.handle_display(v, **kwargs)
for k, v in item.items()
}
} | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/serialization/check_result/json.py | 0.933363 | 0.236527 | json.py | pypi |
"""Module containing html serializer for the pandas.DataFrame type."""
import typing as t
import warnings
import pandas as pd
from pandas.io.formats.style import Styler
from runml_checks.core.serialization.abc import HtmlSerializer
__all__ = ['DataFrameSerializer']
DataFrameOrStyler = t.Union[pd.DataFrame, Styler]
class DataFrameSerializer(HtmlSerializer[DataFrameOrStyler]):
"""Serializes pandas.DataFrame instance into HTML format.
Parameters
----------
value : Union[pandas.DataFrame, Styler]
DataFrame instance that needed to be serialized.
"""
def __init__(self, value: DataFrameOrStyler, **kwargs):
if not isinstance(value, (pd.DataFrame, Styler)):
raise TypeError(
f'Expected "Union[DataFrame, Styler]" but got "{type(value).__name__}"'
)
super().__init__(value=value)
def serialize(self, **kwargs) -> str:
"""Serialize pandas.DataFrame instance into HTML format."""
try:
if isinstance(self.value, pd.DataFrame):
df_styler = self.value.style
else:
df_styler = self.value
# Using deprecated pandas method so hiding the warning
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=FutureWarning)
df_styler.set_precision(2)
table_css_props = [
('text-align', 'left'), # Align everything to the left
('white-space', 'pre-wrap') # Define how to handle white space characters (like \n)
]
df_styler.set_table_styles([dict(selector='table,thead,tbody,th,td', props=table_css_props)])
return df_styler.render()
# Because of MLC-154. Dataframe with Multi-index or non unique indices does not have a style
# attribute, hence we need to display as a regular pd html format.
except ValueError:
return self.value.to_html() | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/serialization/dataframe/html.py | 0.889517 | 0.227587 | html.py | pypi |
"""Module containing Wandb serializer for the SuiteResult type."""
import typing as t
from collections import OrderedDict
from runml_checks.core import check_result as check_types
from runml_checks.core import suite
from runml_checks.core.serialization.abc import WandbSerializer
from runml_checks.core.serialization.check_failure.wandb import CheckFailureSerializer
from runml_checks.core.serialization.check_result.wandb import CheckResultSerializer
if t.TYPE_CHECKING:
from wandb.sdk.data_types.base_types.wb_value import WBValue # pylint: disable=unused-import
class SuiteResultSerializer(WandbSerializer['suite.SuiteResult']):
"""Serializes any SuiteResult instance into Wandb media format.
Parameters
----------
value : SuiteResult
SuiteResult instance that needed to be serialized.
"""
def __init__(self, value: 'suite.SuiteResult', **kwargs):
if not isinstance(value, suite.SuiteResult):
raise TypeError(
f'Expected "SuiteResult" but got "{type(value).__name__}"'
)
super().__init__(value=value)
def serialize(self, **kwargs) -> t.Dict[str, 'WBValue']:
"""Serialize a SuiteResult instance into Wandb media format.
Parameters
----------
**kwargs :
all key-value arguments will be passed to the CheckResult/CheckFailure
serializers
Returns
-------
Dict[str, WBValue]
"""
suite_name = self.value.name
results: t.List[t.Tuple[str, 'WBValue']] = []
for result in self.value.results:
if isinstance(result, check_types.CheckResult):
results.extend([
(f'{suite_name}/{k}', v)
for k, v in CheckResultSerializer(result).serialize(**kwargs).items()
])
elif isinstance(result, check_types.CheckFailure):
results.extend([
(f'{suite_name}/{k}', v)
for k, v in CheckFailureSerializer(result).serialize(**kwargs).items()
])
else:
raise TypeError(f'Unknown result type - {type(result)}')
return OrderedDict(results) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/serialization/suite_result/wandb.py | 0.889265 | 0.252137 | wandb.py | pypi |
"""Module containing SuiteResult serialization logic."""
import typing as t
from IPython.display import HTML
from runml_checks.core import check_result as check_types
from runml_checks.core import suite
from runml_checks.core.serialization.abc import IPythonFormatter, IPythonSerializer
from runml_checks.core.serialization.check_result.html import CheckResultSection
from runml_checks.core.serialization.check_result.ipython import CheckResultSerializer
from runml_checks.core.serialization.common import Html, flatten, form_output_anchor, join
from runml_checks.utils.html import linktag
from . import html
__all__ = ['SuiteResultSerializer']
class SuiteResultSerializer(IPythonSerializer['suite.SuiteResult']):
"""Serializes any SuiteResult instance into a list of IPython formatters.
Parameters
----------
value : SuiteResult
SuiteResult instance that needed to be serialized.
"""
def __init__(self, value: 'suite.SuiteResult', **kwargs):
if not isinstance(value, suite.SuiteResult):
raise TypeError(
f'Expected "SuiteResult" but got "{type(value).__name__}"'
)
super().__init__(value=value)
self._html_serializer = html.SuiteResultSerializer(value)
def serialize(
self,
output_id: t.Optional[str] = None,
is_for_iframe_with_srcdoc: bool = False,
**kwargs,
) -> t.List[IPythonFormatter]:
"""Serialize a SuiteResult instance into a list of IPython formatters.
Parameters
----------
output_id : Optional[str], default None
unique output identifier that will be used to form anchor links
is_for_iframe_with_srcdoc : bool, default False
anchor links, in order to work within iframe require additional prefix
'about:srcdoc'. This flag tells function whether to add that prefix to
the anchor link or not
**kwargs :
all other key-value arguments will be passed to the CheckResult/CheckFailure
serializers
Returns
-------
List[IPythonFormatter]
"""
summary = self.prepare_summary(output_id=output_id, **kwargs)
conditions_table = self.prepare_conditions_table(output_id=output_id, **kwargs)
failures = self.prepare_failures_list()
results_with_conditions = self.prepare_results_with_condition_and_display(
output_id=output_id,
check_sections=['condition-table', 'additional-output'],
**kwargs
)
results_without_conditions = self.prepare_results_without_condition(
output_id=output_id,
check_sections=['additional-output'],
**kwargs
)
sections = [
summary,
HTML(Html.bold_hr),
conditions_table,
HTML(Html.bold_hr),
results_with_conditions,
HTML(Html.bold_hr),
results_without_conditions,
]
if failures:
sections.extend([HTML(Html.bold_hr), failures])
if output_id:
link = linktag(
text='Go to top',
href=f'#{form_output_anchor(output_id)}',
style={'font-size': '14px'},
is_for_iframe_with_srcdoc=is_for_iframe_with_srcdoc
)
sections.append(HTML(f'<br>{link}'))
return list(flatten(sections))
def prepare_summary(self, output_id: t.Optional[str] = None, **kwargs) -> HTML:
"""Prepare summary section.
Parameters
----------
output_id : Optional[str], default None
unique output identifier that will be used to form anchor links
Returns
-------
HTML
"""
return HTML(self._html_serializer.prepare_summary(
output_id=output_id,
**kwargs
))
def prepare_conditions_table(
self,
output_id: t.Optional[str] = None,
include_check_name: bool = True,
**kwargs
) -> HTML:
"""Prepare conditions table section.
Parameters
----------
output_id : Optional[str], default None
unique output identifier that will be used to form anchor links
include_check_name : bool, default True
wherether to include check name into table or not
Returns
-------
HTML
"""
return HTML(self._html_serializer.prepare_conditions_table(
output_id=output_id,
include_check_name=include_check_name,
**kwargs
))
def prepare_results_with_condition_and_display(
self,
output_id: t.Optional[str] = None,
check_sections: t.Optional[t.Sequence[CheckResultSection]] = None,
**kwargs
) -> t.List[IPythonFormatter]:
"""Prepare subsection of the content that shows results with conditions.
Parameters
----------
output_id : Optional[str], default None
unique output identifier that will be used to form anchor links
check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None
sequence of check result sections to include into the output,
in case of 'None' all sections will be included
Returns
-------
List[IPythonFormatter]
"""
results = t.cast(
t.List[check_types.CheckResult],
self.value.select_results(
self.value.results_with_conditions & self.value.results_with_display
)
)
results_with_condition_and_display = [
CheckResultSerializer(it).serialize(
output_id=output_id,
check_sections=check_sections,
**kwargs
)
for it in results
]
content = join(
results_with_condition_and_display,
HTML(Html.light_hr)
)
return list(flatten([
HTML('<h2>Check With Conditions Output</h2>'),
content
]))
def prepare_results_without_condition(
self,
output_id: t.Optional[str] = None,
check_sections: t.Optional[t.Sequence[CheckResultSection]] = None,
**kwargs
) -> t.List[IPythonFormatter]:
"""Prepare subsection of the content that shows results without conditions.
Parameters
----------
output_id : Optional[str], default None
unique output identifier that will be used to form anchor links
check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None
sequence of check result sections to include into the output,
in case of 'None' all sections will be included
Returns
-------
List[IPythonFormatter]
"""
results = t.cast(
t.List[check_types.CheckResult],
self.value.select_results(
self.value.results_without_conditions & self.value.results_with_display,
)
)
results_without_conditions = [
CheckResultSerializer(it).serialize(
output_id=output_id,
include=check_sections,
include_plotlyjs=False,
include_requirejs=False,
**kwargs
)
for it in results
]
content = join(
results_without_conditions,
HTML(Html.light_hr)
)
return list(flatten([
HTML('<h2>Check Without Conditions Output</h2>'),
content
]))
def prepare_failures_list(self) -> HTML:
"""Prepare subsection of the content that shows list of failures."""
return HTML(self._html_serializer.prepare_failures_list()) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/serialization/suite_result/ipython.py | 0.881475 | 0.172137 | ipython.py | pypi |
"""Module containing JSON serializer for the SuiteResult type."""
import typing as t
from runml_checks.core import check_result as check_types
from runml_checks.core import suite
from runml_checks.core.serialization.abc import JsonSerializer
from runml_checks.core.serialization.check_failure.json import CheckFailureSerializer
from runml_checks.core.serialization.check_result.json import CheckResultSerializer
__all__ = ['SuiteResultSerializer']
class SuiteResultSerializer(JsonSerializer['suite.SuiteResult']):
"""Serializes any SuiteResult instance into JSON format.
Parameters
----------
value : SuiteResult
SuiteResult instance that needed to be serialized.
"""
def __init__(self, value: 'suite.SuiteResult', **kwargs):
if not isinstance(value, suite.SuiteResult):
raise TypeError(
f'Expected "SuiteResult" but got "{type(value).__name__}"'
)
super().__init__(value=value)
def serialize(
self,
with_display: bool = True,
**kwargs
) -> t.Union[t.Dict[t.Any, t.Any], t.List[t.Any]]:
"""Serialize a SuiteResult instance into JSON format.
Parameters
----------
with_display : bool, default True
whether to include serialized `CheckResult.display` items into
the output or not
**kwargs :
all other key-value arguments will be passed to the CheckResult/CheckFailure
serializers
Returns
-------
Union[Dict[Any, Any], List[Any]]
"""
results = []
for it in self.value.results:
if isinstance(it, check_types.CheckResult):
results.append(CheckResultSerializer(it).serialize(with_display=with_display))
elif isinstance(it, check_types.CheckFailure):
results.append(CheckFailureSerializer(it).serialize())
else:
raise TypeError(f'Unknown result type - {type(it)}')
return {'name': self.value.name, 'results': results, 'type' : 'SuiteResult'} | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/serialization/suite_result/json.py | 0.890836 | 0.171737 | json.py | pypi |
"""Module containing html serializer for the CheckFailuer type."""
from typing import Optional
from runml_checks.core import check_result as check_types
from runml_checks.core.serialization.abc import HtmlSerializer
__all__ = ['CheckFailureSerializer']
class CheckFailureSerializer(HtmlSerializer['check_types.CheckFailure']):
"""Serializes any CheckFailure instance into html format.
Parameters
----------
value : CheckFailure
CheckFailure instance that needed to be serialized.
"""
def __init__(self, value: 'check_types.CheckFailure', **kwargs):
if not isinstance(value, check_types.CheckFailure):
raise TypeError(
f'Expected "CheckFailure" but got "{type(value).__name__}"'
)
super().__init__(value=value)
def serialize(
self,
full_html: bool = False,
**kwargs
) -> str:
"""Serialize a CheckFailure instance into html format.
Returns
-------
str
"""
header = self.prepare_header()
content = ''.join([header, self.prepare_summary(), self.prepare_error_message()])
if full_html is True:
return (
'<html>'
f'<head><title>{header}</title></head>'
f'<body style="background-color: white;">{content}</body>'
'</html>'
)
else:
return content
def prepare_header(self, output_id: Optional[str] = None) -> str:
"""Prepare the header section of the html output."""
header = self.value.get_header()
header = f'<b>{header}</b>'
if output_id is not None:
check_id = self.value.get_check_id(output_id)
return f'<h4 id="{check_id}">{header}</h4>'
else:
return f'<h4>{header}</h4>'
def prepare_summary(self) -> str:
"""Prepare the summary section of the html output."""
return f'<p>{self.value.get_metadata()["summary"]}</p>'
def prepare_error_message(self) -> str:
"""Prepare the error message of the html output."""
return f'<p style="color:red">{self.value.exception}</p>' | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/serialization/check_failure/html.py | 0.936037 | 0.150778 | html.py | pypi |
"""Module containing class performance condition utils."""
import typing as t
import numpy as np
import pandas as pd
from runml_checks.core import ConditionResult
from runml_checks.core.condition import ConditionCategory
from runml_checks.core.errors import runml_checksValueError
from runml_checks.utils.dict_funcs import get_dict_entry_by_value
from runml_checks.utils.strings import format_number, format_percent
__all__ = ['get_condition_test_performance_greater_than',
'get_condition_train_test_relative_degradation_less_than',
'get_condition_class_performance_imbalance_ratio_less_than']
def get_condition_test_performance_greater_than(min_score: float) -> \
t.Callable[[pd.DataFrame], ConditionResult]:
"""Add condition - test metric scores are greater than the threshold.
Parameters
----------
min_score : float
Minimum score to pass the check.
Returns
-------
Callable
the condition function
"""
def condition(check_result: pd.DataFrame):
test_scores = check_result.loc[check_result['Dataset'] == 'Test']
not_passed_test = test_scores.loc[test_scores['Value'] <= min_score]
is_passed = len(not_passed_test) == 0
has_classes = check_result.get('Class') is not None
details = ''
if not is_passed:
details += f'Found {len(not_passed_test)} scores below threshold.\n'
min_metric = test_scores.loc[test_scores['Value'].idxmin()]
details += f'Found minimum score for {min_metric["Metric"]} metric of value ' \
f'{format_number(min_metric["Value"])}'
if has_classes:
details += f' for class {min_metric.get("Class Name", min_metric["Class"])}.'
return ConditionResult(ConditionCategory.PASS if is_passed else ConditionCategory.FAIL, details)
return condition
def get_condition_train_test_relative_degradation_less_than(threshold: float) -> \
t.Callable[[pd.DataFrame], ConditionResult]:
"""Add condition - test performance is not degraded by more than given percentage in train.
Parameters
----------
threshold : float
maximum degradation ratio allowed (value between 0 and 1)
Returns
-------
Callable
the condition function
"""
def _ratio_of_change_calc(score_1, score_2):
if score_1 == 0:
if score_2 == 0:
return 0
return threshold + 1
return (score_1 - score_2) / abs(score_1)
def condition(check_result: pd.DataFrame) -> ConditionResult:
test_scores = check_result.loc[check_result['Dataset'] == 'Test']
train_scores = check_result.loc[check_result['Dataset'] == 'Train']
max_degradation = ('', -np.inf)
num_failures = 0
def update_max_degradation(diffs, class_name):
nonlocal max_degradation
max_scorer, max_diff = get_dict_entry_by_value(diffs)
if max_diff > max_degradation[1]:
max_degradation_details = f'Found max degradation of {format_percent(max_diff)} for metric {max_scorer}'
if class_name is not None:
max_degradation_details += f' and class {class_name}.'
max_degradation = max_degradation_details, max_diff
if check_result.get('Class') is not None:
if check_result.get('Class Name') is not None:
class_column = 'Class Name'
else:
class_column = 'Class'
classes = check_result[class_column].unique()
else:
classes = None
if classes is not None:
for class_name in classes:
test_scores_class = test_scores.loc[test_scores[class_column] == class_name]
train_scores_class = train_scores.loc[train_scores[class_column] == class_name]
test_scores_dict = dict(zip(test_scores_class['Metric'], test_scores_class['Value']))
train_scores_dict = dict(zip(train_scores_class['Metric'], train_scores_class['Value']))
if len(test_scores_dict) == 0 or len(train_scores_dict) == 0:
continue
# Calculate percentage of change from train to test
diff = {score_name: _ratio_of_change_calc(score, test_scores_dict[score_name])
for score_name, score in train_scores_dict.items()}
update_max_degradation(diff, class_name)
num_failures += len([v for v in diff.values() if v >= threshold])
else:
test_scores_dict = dict(zip(test_scores['Metric'], test_scores['Value']))
train_scores_dict = dict(zip(train_scores['Metric'], train_scores['Value']))
if not (len(test_scores_dict) == 0 or len(train_scores_dict) == 0):
# Calculate percentage of change from train to test
diff = {score_name: _ratio_of_change_calc(score, test_scores_dict[score_name])
for score_name, score in train_scores_dict.items()}
update_max_degradation(diff, None)
num_failures += len([v for v in diff.values() if v >= threshold])
if num_failures > 0:
message = f'{num_failures} scores failed. ' + max_degradation[0]
return ConditionResult(ConditionCategory.FAIL, message)
else:
message = max_degradation[0]
return ConditionResult(ConditionCategory.PASS, message)
return condition
def get_condition_class_performance_imbalance_ratio_less_than(threshold: float, score: str) -> \
t.Callable[[pd.DataFrame], ConditionResult]:
"""Add condition - relative ratio difference between highest-class and lowest-class is less than threshold.
Parameters
----------
threshold : float
ratio difference threshold
score : str
limit score for condition
Returns
-------
Callable
the condition function
"""
def condition(check_result: pd.DataFrame) -> ConditionResult:
if score not in set(check_result['Metric']):
raise runml_checksValueError(f'Data was not calculated using the scoring function: {score}')
condition_passed = True
datasets_details = []
for dataset in ['Test', 'Train']:
data = check_result.loc[(check_result['Dataset'] == dataset) & (check_result['Metric'] == score)]
min_value_index = data['Value'].idxmin()
min_row = data.loc[min_value_index]
min_class_name = min_row.get('Class Name', min_row['Class'])
min_value = min_row['Value']
max_value_index = data['Value'].idxmax()
max_row = data.loc[max_value_index]
max_class_name = max_row.get('Class Name', max_row['Class'])
max_value = max_row['Value']
relative_difference = abs((min_value - max_value) / max_value)
condition_passed &= relative_difference < threshold
details = (
f'Relative ratio difference between highest and lowest in {dataset} dataset '
f'classes is {format_percent(relative_difference)}, using {score} metric. '
f'Lowest class - {min_class_name}: {format_number(min_value)}; '
f'Highest class - {max_class_name}: {format_number(max_value)}'
)
datasets_details.append(details)
category = ConditionCategory.PASS if condition_passed else ConditionCategory.FAIL
return ConditionResult(category, details='\n'.join(datasets_details))
return condition | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/check_utils/class_performance_utils.py | 0.936227 | 0.401864 | class_performance_utils.py | pypi |
"""Module containing common feature label correlation (PPS) utils."""
from typing import Optional
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import runml_checks.ppscore as pps
from runml_checks.utils.plot import colors
from runml_checks.utils.strings import format_percent
from runml_checks.utils.typing import Hashable
def get_pps_figure(per_class: bool, n_of_features: int):
"""If per_class is True, then no title is defined on the figure."""
fig = go.Figure()
fig.update_layout(
yaxis_title='Predictive Power Score (PPS)',
yaxis_range=(0, 1.05),
# NOTE:
# the range, in this case, is needed to fix a problem with
# too wide bars when there are only one or two of them`s on
# the plot, plus it also centralizes them`s on the plot
# The min value of the range (range(min. max)) is bigger because
# otherwise bars will not be centralized on the plot, they will
# appear on the left part of the plot (that is probably because of zero)
xaxis_range=(-3, n_of_features + 2),
legend=dict(x=1.0, y=1.0),
barmode='group',
height=500,
# Set the x-axis as category, since if the column names are numbers it will infer the x-axis as numerical
# and will show the values very far from each other
xaxis_type='category'
)
if per_class:
fig.update_layout(xaxis_title='Class')
else:
fig.update_layout(
title='Predictive Power Score (PPS) - Can a feature predict the label by itself?',
xaxis_title='Column',
)
return fig
def pd_series_to_trace(s_pps: pd.Series, name: str):
"""Create bar plotly bar trace out of pandas Series."""
name = name.capitalize() if name else None
return go.Bar(x=s_pps.index,
y=s_pps,
name=name,
marker_color=colors.get(name),
text='<b>' + s_pps.round(2).astype(str) + '</b>',
textposition='outside'
)
def pd_series_to_trace_with_diff(s_pps: pd.Series, name: str, diffs: pd.Series):
"""Create bar plotly bar trace out of pandas Series, with difference shown in percentages."""
diffs_text = '(' + diffs.apply(format_percent, floating_point=0, add_positive_prefix=True) + ')'
text = diffs_text + '<br>' + s_pps.round(2).astype(str)
name = name.capitalize() if name else None
return go.Bar(x=s_pps.index,
y=s_pps,
name=name,
marker_color=colors.get(name),
text='<b>' + text + '</b>',
textposition='outside'
)
def get_feature_label_correlation(train_df: pd.DataFrame, train_label_name: Optional[Hashable],
test_df: pd.DataFrame,
test_label_name: Optional[Hashable], ppscore_params: dict,
n_show_top: int,
min_pps_to_show: float = 0.05,
random_state: int = None,
with_display: bool = True):
"""
Calculate the PPS for train, test and difference for feature label correlation checks.
The PPS represents the ability of a feature to single-handedly predict another feature or label.
This function calculates the PPS per feature for both train and test, and returns the data and display graph.
Uses the ppscore package - for more info, see https://github.com/8080labs/ppscore
Args:
train_df: pd.DataFrame
DataFrame of all train features and label
train_label_name:: str
name of label column in train dataframe
test_df:
DataFrame of all test features and label
test_label_name: str
name of label column in test dataframe
ppscore_params: dict
dictionary of additional parameters for the ppscore predictor function
n_show_top: int
Number of features to show, sorted by the magnitude of difference in PPS
min_pps_to_show: float, default 0.05
Minimum PPS to show a class in the graph
random_state: int, default None
Random state for the ppscore.predictors function
Returns:
CheckResult
value: dictionaries of PPS values for train, test and train-test difference.
display: bar graph of the PPS of each feature.
"""
df_pps_train = pps.predictors(df=train_df, y=train_label_name,
random_seed=random_state,
**ppscore_params)
df_pps_test = pps.predictors(df=test_df,
y=test_label_name,
random_seed=random_state, **ppscore_params)
s_pps_train = df_pps_train.set_index('x', drop=True)['ppscore']
s_pps_test = df_pps_test.set_index('x', drop=True)['ppscore']
s_difference = s_pps_train - s_pps_test
ret_value = {'train': s_pps_train.to_dict(), 'test': s_pps_test.to_dict(),
'train-test difference': s_difference.to_dict()}
if not with_display:
return ret_value, None
sorted_order_for_display = np.abs(s_difference).sort_values(ascending=False).head(n_show_top).index
s_pps_train_to_display = s_pps_train[sorted_order_for_display]
s_pps_test_to_display = s_pps_test[sorted_order_for_display]
s_difference_to_display = s_difference[sorted_order_for_display]
fig = get_pps_figure(per_class=False, n_of_features=len(sorted_order_for_display))
fig.add_trace(pd_series_to_trace(s_pps_train_to_display, 'train'))
fig.add_trace(pd_series_to_trace_with_diff(s_pps_test_to_display, 'test', -s_difference_to_display))
# display only if not all scores are above min_pps_to_show
display = [fig] if any(s_pps_train > min_pps_to_show) or any(s_pps_test > min_pps_to_show) else None
return ret_value, display
def get_feature_label_correlation_per_class(train_df: pd.DataFrame, train_label_name: Optional[Hashable],
test_df: pd.DataFrame,
test_label_name: Optional[Hashable], ppscore_params: dict,
n_show_top: int,
min_pps_to_show: float = 0.05,
random_state: int = None,
with_display: bool = True):
"""
Calculate the PPS for train, test and difference for feature label correlation checks per class.
The PPS represents the ability of a feature to single-handedly predict another feature or label.
This function calculates the PPS per feature for both train and test, and returns the data and display graph.
Uses the ppscore package - for more info, see https://github.com/8080labs/ppscore
Args:
train_df: pd.DataFrame
DataFrame of all train features and label
train_label_name:: str
name of label column in train dataframe
test_df:
DataFrame of all test features and label
test_label_name: str
name of label column in test dataframe
ppscore_params: dict
dictionary of additional parameters for the ppscore predictor function
n_show_top: int
Number of features to show, sorted by the magnitude of difference in PPS
min_pps_to_show: float, default 0.05
Minimum PPS to show a class in the graph
random_state: int, default None
Random state for the ppscore.predictors function
Returns:
CheckResult
value: dictionaries of features, each value is 3 dictionaries of PPS values for train, test and
train-test difference.
display: bar graphs of the PPS for each feature.
"""
df_pps_train_all = pd.DataFrame()
df_pps_test_all = pd.DataFrame()
df_pps_difference_all = pd.DataFrame()
display = []
ret_value = {}
for c in train_df[train_label_name].unique():
train_df_all_vs_one = train_df.copy()
test_df_all_vs_one = test_df.copy()
train_df_all_vs_one[train_label_name] = train_df_all_vs_one[train_label_name].apply(
lambda x: 1 if x == c else 0) # pylint: disable=cell-var-from-loop
test_df_all_vs_one[test_label_name] = test_df_all_vs_one[test_label_name].apply(
lambda x: 1 if x == c else 0) # pylint: disable=cell-var-from-loop
df_pps_train = pps.predictors(df=train_df_all_vs_one, y=train_label_name,
random_seed=random_state,
**ppscore_params)
df_pps_test = pps.predictors(df=test_df_all_vs_one,
y=test_label_name,
random_seed=random_state, **ppscore_params)
s_pps_train = df_pps_train.set_index('x', drop=True)['ppscore']
s_pps_test = df_pps_test.set_index('x', drop=True)['ppscore']
s_difference = s_pps_train - s_pps_test
df_pps_train_all[c] = s_pps_train
df_pps_test_all[c] = s_pps_test
df_pps_difference_all[c] = s_difference
for feature in df_pps_train_all.index:
s_train = df_pps_train_all.loc[feature]
s_test = df_pps_test_all.loc[feature]
s_difference = df_pps_difference_all.loc[feature]
ret_value[feature] = {'train': s_train.to_dict(), 'test': s_test.to_dict(),
'train-test difference': s_difference.to_dict()}
# display only if not all scores are above min_pps_to_show
if with_display and any(s_train > min_pps_to_show) or any(s_test > min_pps_to_show):
sorted_order_for_display = np.abs(s_difference).sort_values(ascending=False).head(n_show_top).index
s_train_to_display = s_train[sorted_order_for_display]
s_test_to_display = s_test[sorted_order_for_display]
s_difference_to_display = s_difference[sorted_order_for_display]
fig = get_pps_figure(per_class=True, n_of_features=len(sorted_order_for_display))
fig.update_layout(title=f'{feature}: Predictive Power Score (PPS) Per Class')
fig.add_trace(pd_series_to_trace(s_train_to_display, 'train'))
fig.add_trace(pd_series_to_trace_with_diff(s_test_to_display, 'test', -s_difference_to_display))
display.append(fig)
return ret_value, display | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/check_utils/feature_label_correlation_utils.py | 0.960426 | 0.584093 | feature_label_correlation_utils.py | pypi |
"""Module containing common WholeDatasetDriftCheck (domain classifier drift) utils."""
import warnings
from typing import Container, List
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from sklearn.compose import ColumnTransformer
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.experimental import enable_hist_gradient_boosting # noqa # pylint: disable=unused-import
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
from runml_checks.tabular import Dataset
from runml_checks.utils.dataframes import floatify_dataframe, floatify_series
from runml_checks.utils.distribution.plot import drift_score_bar_traces, feature_distribution_traces
from runml_checks.utils.distribution.rare_category_encoder import RareCategoryEncoder
from runml_checks.utils.features import N_TOP_MESSAGE, calculate_feature_importance_or_none
from runml_checks.utils.function import run_available_kwargs
from runml_checks.utils.strings import format_percent
from runml_checks.utils.typing import Hashable
def run_whole_dataset_drift(train_dataframe: pd.DataFrame, test_dataframe: pd.DataFrame,
numerical_features: List[Hashable], cat_features: List[Hashable], sample_size: int,
random_state: int, test_size: float, n_top_columns: int, min_feature_importance: float,
max_num_categories_for_display: int, show_categories_by: str,
min_meaningful_drift_score: float,
with_display: bool):
"""Calculate whole dataset drift."""
domain_classifier = generate_model(numerical_features, cat_features, random_state)
train_sample_df = train_dataframe.sample(sample_size, random_state=random_state)
test_sample_df = test_dataframe.sample(sample_size, random_state=random_state)
# create new dataset, with label denoting whether sample belongs to test dataset
domain_class_df = pd.concat([train_sample_df, test_sample_df])
domain_class_labels = pd.Series([0] * len(train_sample_df) + [1] * len(test_sample_df))
x_train, x_test, y_train, y_test = train_test_split(domain_class_df, domain_class_labels,
stratify=domain_class_labels,
random_state=random_state,
test_size=test_size)
# domain_classifier has problems with nullable int series
x_train = floatify_dataframe(x_train)
x_test = floatify_dataframe(x_test)
y_train = floatify_series(y_train)
y_test = floatify_series(y_test)
domain_classifier = domain_classifier.fit(x_train, y_train)
y_test.name = 'belongs_to_test'
domain_test_dataset = Dataset(pd.concat([x_test.reset_index(drop=True), y_test.reset_index(drop=True)], axis=1),
cat_features=cat_features, label='belongs_to_test')
# calculate feature importance of domain_classifier, containing the information which features separate
# the dataset best.
fi, importance_type = calculate_feature_importance_or_none(
domain_classifier,
domain_test_dataset,
force_permutation=True,
permutation_kwargs={'n_repeats': 10,
'random_state': random_state,
'timeout': 120,
'skip_messages': True}
)
fi = fi.sort_values(ascending=False) if fi is not None else None
domain_classifier_auc = roc_auc_score(y_test, domain_classifier.predict_proba(x_test)[:, 1])
drift_score = auc_to_drift_score(domain_classifier_auc)
values_dict = {
'domain_classifier_auc': domain_classifier_auc,
'domain_classifier_drift_score': drift_score,
'domain_classifier_feature_importance': fi.to_dict() if fi is not None else {},
}
feature_importance_note = f"""
<span>
The percents of explained dataset difference are the importance values for the feature calculated
using `{importance_type}`.
</span><br><br>
"""
if with_display and fi is not None and drift_score > min_meaningful_drift_score:
top_fi = fi.head(n_top_columns)
top_fi = top_fi.loc[top_fi > min_feature_importance]
else:
top_fi = None
if top_fi is not None and len(top_fi):
score = values_dict['domain_classifier_drift_score']
displays = [
feature_importance_note,
build_drift_plot(score),
'<h3>Main features contributing to drift</h3>',
N_TOP_MESSAGE % n_top_columns,
*(
display_dist(
train_sample_df[feature],
test_sample_df[feature],
top_fi,
cat_features,
max_num_categories_for_display,
show_categories_by)
for feature in top_fi.index
)
]
else:
displays = None
return values_dict, displays
def generate_model(numerical_columns: List[Hashable], categorical_columns: List[Hashable],
random_state: int = 42) -> Pipeline:
"""Generate the unfitted Domain Classifier model."""
categorical_transformer = Pipeline(
steps=[('rare', RareCategoryEncoder(254)),
('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',
unknown_value=np.nan,
dtype=np.float64))]
)
preprocessor = ColumnTransformer(
transformers=[
('num', 'passthrough', numerical_columns),
('cat', categorical_transformer, categorical_columns),
]
)
return Pipeline(
steps=[('preprocessing', preprocessor),
('model', run_available_kwargs(HistGradientBoostingClassifier,
max_depth=2, max_iter=10, random_state=random_state,
categorical_features=[False] * len(numerical_columns)
+ [True] * len(categorical_columns)
))])
def auc_to_drift_score(auc: float) -> float:
"""Calculate the drift score, which is 2*auc - 1, with auc being the auc of the Domain Classifier.
Parameters
----------
auc : float
auc of the Domain Classifier
"""
return max(2 * auc - 1, 0)
def build_drift_plot(score):
"""Build traffic light drift plot."""
bar_traces, x_axis, y_axis = drift_score_bar_traces(score)
x_axis['title'] = 'Drift score'
drift_plot = go.Figure(layout=dict(
title='Drift Score - Whole Dataset Total',
xaxis=x_axis,
yaxis=y_axis,
height=200
))
drift_plot.add_traces(bar_traces)
return drift_plot
def display_dist(
train_column: pd.Series,
test_column: pd.Series,
fi: pd.Series,
cat_features: Container[str],
max_num_categories: int,
show_categories_by: str
):
"""Create a distribution comparison plot for the given columns."""
column_name = train_column.name or ''
column_fi = fi.loc[column_name]
title = f'Feature: {column_name} - Explains {format_percent(column_fi)} of dataset difference'
dist_traces, xaxis_layout, yaxis_layout = feature_distribution_traces(
train_column.dropna(),
test_column.dropna(),
column_name,
is_categorical=column_name in cat_features,
max_num_categories=max_num_categories,
show_categories_by=show_categories_by
)
all_categories = list(set(train_column).union(set(test_column)))
add_footnote = column_name in cat_features and len(all_categories) > max_num_categories
if not add_footnote:
fig = go.Figure()
fig.add_traces(dist_traces)
else:
if show_categories_by == 'train_largest':
categories_order_description = 'largest categories (by train)'
elif show_categories_by == 'test_largest':
categories_order_description = 'largest categories (by test)'
elif show_categories_by == 'largest_difference':
categories_order_description = 'largest difference between categories'
else:
raise ValueError(f'Unsupported "show_categories_by" value - {show_categories_by}')
train_data_percents = dist_traces[0].y.sum()
test_data_percents = dist_traces[1].y.sum()
annotation = (
f'* Showing the top {max_num_categories} {categories_order_description} out of '
f'total {len(all_categories)} categories.'
f'<br>Shown data is {format_percent(train_data_percents)} of train data and '
f'{format_percent(test_data_percents)} of test data.'
)
fig = (
go.Figure()
.add_traces(dist_traces)
.add_annotation(
x=0, y=-0.4,
showarrow=False,
xref='paper',
yref='paper',
xanchor='left',
text=annotation)
)
return fig.update_layout(go.Layout(
title=title,
xaxis=xaxis_layout,
yaxis=yaxis_layout,
legend=dict(
title='Dataset',
yanchor='top',
y=0.9,
xanchor='left'),
height=300
)) | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/check_utils/whole_dataset_drift_utils.py | 0.925445 | 0.562477 | whole_dataset_drift_utils.py | pypi |
"""Module containing methods for calculating correlation between variables."""
import math
from collections import Counter
from typing import List, Union
import numpy as np
import pandas as pd
from scipy.stats import entropy
from runml_checks.utils.distribution.preprocessing import value_frequency
def conditional_entropy(x: Union[List, np.ndarray, pd.Series], y: Union[List, np.ndarray, pd.Series]) -> float:
"""
Calculate the conditional entropy of x given y: S(x|y).
Wikipedia: https://en.wikipedia.org/wiki/Conditional_entropy
Parameters:
-----------
x: Union[List, np.ndarray, pd.Series]
A sequence of numerical_variable without nulls
y: Union[List, np.ndarray, pd.Series]
A sequence of numerical_variable without nulls
Returns:
--------
float
Representing the conditional entropy
"""
y_counter = Counter(y)
xy_counter = Counter(list(zip(x, y)))
total_occurrences = sum(y_counter.values())
s_xy = 0.0
for xy in xy_counter:
p_xy = xy_counter[xy] / total_occurrences
p_y = y_counter[xy[1]] / total_occurrences
s_xy += p_xy * math.log(p_y / p_xy, math.e)
return s_xy
def theil_u_correlation(x: Union[List, np.ndarray, pd.Series], y: Union[List, np.ndarray, pd.Series]) -> float:
"""
Calculate the Theil's U correlation of y to x.
Theil's U is an asymmetric measure ranges [0,1] based on entropy which answers the question: how well does
variable y explains variable x? For more information see https://en.wikipedia.org/wiki/Uncertainty_coefficient
Parameters:
-----------
x: Union[List, np.ndarray, pd.Series]
A sequence of a categorical variable values without nulls
y: Union[List, np.ndarray, pd.Series]
A sequence of a categorical variable values without nulls
Returns:
--------
float
Representing the Theil U correlation between y and x
"""
s_xy = conditional_entropy(x, y)
values_probabilities = value_frequency(x)
s_x = entropy(values_probabilities)
if s_x == 0:
return 1
else:
return (s_x - s_xy) / s_x
def symmetric_theil_u_correlation(x: Union[List, np.ndarray, pd.Series], y: Union[List, np.ndarray, pd.Series]) -> \
float:
"""
Calculate the symmetric Theil's U correlation of y to x.
Parameters:
-----------
x: Union[List, np.ndarray, pd.Series]
A sequence of a categorical variable values without nulls
y: Union[List, np.ndarray, pd.Series]
A sequence of a categorical variable values without nulls
Returns:
--------
float
Representing the symmetric Theil U correlation between y and x
"""
h_x = entropy(value_frequency(x))
h_y = entropy(value_frequency(y))
u_xy = theil_u_correlation(x, y)
u_yx = theil_u_correlation(y, x) # pylint: disable=arguments-out-of-order
u_sym = (h_x * u_xy + h_y * u_yx) / (h_x + h_y)
return u_sym
def correlation_ratio(categorical_data: Union[List, np.ndarray, pd.Series],
numerical_data: Union[List, np.ndarray, pd.Series],
ignore_mask: Union[List[bool], np.ndarray] = None) -> float:
"""
Calculate the correlation ratio of numerical_variable to categorical_variable.
Correlation ratio is a symmetric grouping based method that describe the level of correlation between
a numeric variable and a categorical variable. returns a value in [0,1].
For more information see https://en.wikipedia.org/wiki/Correlation_ratio
Parameters:
-----------
categorical_data: Union[List, np.ndarray, pd.Series]
A sequence of categorical values encoded as class indices without nulls except possibly at ignored elements
numerical_data: Union[List, np.ndarray, pd.Series]
A sequence of numerical values without nulls except possibly at ignored elements
ignore_mask: Union[List[bool], np.ndarray[bool]] default: None
A sequence of boolean values indicating which elements to ignore. If None, includes all indexes.
Returns:
--------
float
Representing the correlation ratio between the variables.
"""
if ignore_mask:
numerical_data = numerical_data[~np.asarray(ignore_mask)]
categorical_data = categorical_data[~np.asarray(ignore_mask)]
cat_num = int(np.max(categorical_data) + 1)
y_avg_array = np.zeros(cat_num)
n_array = np.zeros(cat_num)
for i in range(cat_num):
cat_measures = numerical_data[categorical_data == i]
n_array[i] = cat_measures.shape[0]
y_avg_array[i] = np.average(cat_measures)
y_total_avg = np.sum(np.multiply(y_avg_array, n_array)) / np.sum(n_array)
numerator = np.sum(np.multiply(n_array, np.power(np.subtract(y_avg_array, y_total_avg), 2)))
denominator = np.sum(np.power(np.subtract(numerical_data, y_total_avg), 2))
if denominator == 0:
eta = 0
else:
eta = np.sqrt(numerator / denominator)
return eta | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/utils/correlation_methods.py | 0.966663 | 0.822866 | correlation_methods.py | pypi |
"""Contain functions for handling dataframes in checks."""
import typing as t
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_integer_dtype, is_numeric_dtype
from runml_checks.core.errors import runml_checksValueError
from runml_checks.utils.typing import Hashable
from runml_checks.utils.validation import ensure_hashable_or_mutable_sequence
__all__ = ['validate_columns_exist', 'select_from_dataframe', 'un_numpy', 'generalized_corrwith',
'floatify_dataframe', 'floatify_series', 'default_fill_na_per_column_type']
def default_fill_na_per_column_type(df: pd.DataFrame, cat_features: t.Union[pd.Series, t.List]) -> pd.DataFrame:
"""Fill NaN values per column type."""
for col_name in df.columns:
if col_name in cat_features:
df[col_name].fillna('None', inplace=True)
elif is_numeric_dtype(df[col_name]):
df[col_name].fillna(df[col_name].mean(), inplace=True)
else:
df[col_name].fillna(df[col_name].mode(), inplace=True)
return df
def floatify_dataframe(df: pd.DataFrame):
"""Return a dataframe where all the int columns are converted to floats.
Parameters
----------
df : pd.DataFrame
dataframe to convert
Raises
------
pd.DataFrame
the dataframe where all the int columns are converted to floats
"""
dtype_dict = df.dtypes.to_dict()
for col_name, dtype in dtype_dict.items():
if is_integer_dtype(dtype):
dtype_dict[col_name] = 'float'
return df.astype(dtype_dict)
def floatify_series(ser: pd.Series):
"""Return a series that if the type is int converted to float.
Parameters
----------
ser : pd.Series
series to convert
Raises
------
pd.Series
the converted series
"""
if is_integer_dtype(ser):
ser = ser.astype(float)
return ser
def un_numpy(val):
"""Convert numpy value to native value.
Parameters
----------
val :
The value to convert.
Returns
-------
returns the numpy value in a native type.
"""
if isinstance(val, np.generic):
if np.isnan(val):
return None
return val.item()
if isinstance(val, np.ndarray):
return val.tolist()
return val
def validate_columns_exist(
df: pd.DataFrame,
columns: t.Union[Hashable, t.List[Hashable]],
raise_error: bool = True
) -> bool:
"""Validate given columns exist in dataframe.
Parameters
----------
df : pd.DataFrame
dataframe to inspect
columns : t.Union[Hashable, t.List[Hashable]]
Column names to check
raise_error : bool, default: True
whether to raise an error if some column is not present in the dataframe or not
Raises
------
runml_checksValueError
If some of the columns do not exist within provided dataframe.
If receives empty list of 'columns'.
If not all elements within 'columns' list are hashable.
"""
error_message = 'columns - expected to receive not empty list of hashable values!'
columns = ensure_hashable_or_mutable_sequence(columns, message=error_message)
is_empty = len(columns) == 0
if raise_error and is_empty:
raise runml_checksValueError(error_message)
elif not raise_error and is_empty:
return False
difference = set(columns) - set(df.columns)
all_columns_present = len(difference) == 0
if raise_error and not all_columns_present:
stringified_columns = ','.join(map(str, difference))
raise runml_checksValueError(f'Given columns do not exist in dataset: {stringified_columns}')
return all_columns_present
def select_from_dataframe(
df: pd.DataFrame,
columns: t.Union[Hashable, t.List[Hashable], None] = None,
ignore_columns: t.Union[Hashable, t.List[Hashable], None] = None
) -> pd.DataFrame:
"""Filter DataFrame columns by given params.
Parameters
----------
df : pd.DataFrame
columns : t.Union[Hashable, t.List[Hashable]] , default: None
Column names to keep.
ignore_columns : t.Union[Hashable, t.List[Hashable]] , default: None
Column names to drop.
Returns
-------
pandas.DataFrame
returns horizontally filtered dataframe
Raises
------
runml_checksValueError
If some columns do not exist within provided dataframe;
If 'columns' and 'ignore_columns' arguments are both not 'None'.
"""
if columns is not None and ignore_columns is not None:
raise runml_checksValueError(
'Cannot receive both parameters "columns" and "ignore", '
'only one must be used at most'
)
elif columns is not None:
columns = ensure_hashable_or_mutable_sequence(columns)
validate_columns_exist(df, columns)
return t.cast(pd.DataFrame, df[columns])
elif ignore_columns is not None:
ignore_columns = ensure_hashable_or_mutable_sequence(ignore_columns)
validate_columns_exist(df, ignore_columns)
return df.drop(labels=ignore_columns, axis='columns')
else:
return df
def generalized_corrwith(x1: pd.DataFrame, x2: pd.DataFrame, method: t.Callable):
"""
Compute pairwise correlation.
Pairwise correlation is computed between columns of one DataFrame with columns of another DataFrame.
Pandas' method corrwith only applies when both dataframes have the same column names,
this generalized method applies to any two Dataframes with the same number of rows, regardless of the column names.
Parameters
----------
x1: DataFrame
Left data frame to compute correlations.
x2: Dataframe
Right data frame to compute correlations.
method: Callable
Method of correlation. callable with input two 1d ndarrays and returning a float.
Returns
-------
DataFrame
Pairwise correlations, the index matches the columns of x1 and the columns match the columns of x2.
"""
corr_results = x2.apply(lambda col: x1.corrwith(col, method=method))
return corr_results | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/utils/dataframes.py | 0.926295 | 0.656713 | dataframes.py | pypi |
"""Module with usefull decorators."""
import textwrap
import typing as t
from functools import wraps
from runml_checks.utils.logger import get_logger
__all__ = ['Substitution', 'Appender', 'deprecate_kwarg']
F = t.TypeVar('F', bound=t.Callable[..., t.Any])
INDENT = ' '
# Substitution and Appender are derived from matplotlib.docstring (1.1.0)
# module https://matplotlib.org/users/license.html
class DocStr(str):
"""Subclass of string that adds several additional methods."""
def dedent(self) -> 'DocStr':
return DocStr(textwrap.dedent(self))
def indent(self) -> 'DocStr':
return DocStr(indent(self))
def __format__(self, *args, **kwargs):
if len(args) == 0:
return super().__format__(*args, **kwargs)
allowed_modifiers = {'dedent', 'indent'}
identation_modifier = args[0]
parts = identation_modifier.split('*')
if len(parts) == 1 and parts[0] in allowed_modifiers:
return getattr(self, parts[0])()
elif len(parts) == 2 and parts[0].isnumeric() and parts[1] in allowed_modifiers:
n_of_times = int(parts[0])
modifier = parts[1]
s = self
for _ in range(n_of_times):
s = getattr(s, modifier)()
return s
return super().__format__(*args, **kwargs)
class Substitution:
"""Substitution docstring placeholders.
A decorator to take a function's docstring and perform string
substitution on it.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter)
Usage: construct a docstring.Substitution with a dictionary suitable
for performing substitution; then decorate a suitable function with
the constructed object. e.g.
sub_author_name = Substitution(author='Jason')
@sub_author_name
def some_function(x):
"{author} wrote this function"
# note that some_function.__doc__ is now "Jason wrote this function"
"""
def __init__(self, **kwargs):
self.params = {
k: DocStr(v) if not isinstance(v, DocStr) else v
for k, v in kwargs.items()
}
def __call__(self, func: F) -> F:
"""Decorate a function."""
func.__doc__ = func.__doc__ and func.__doc__.format(**self.params)
return func
def update(self, **kwargs) -> None:
"""Update self.params with supplied args."""
if isinstance(self.params, dict):
self.params.update({
k: DocStr(v) if not isinstance(v, DocStr) else v
for k, v in kwargs.items()
})
class Appender:
r"""Append addendum to the docstring.
A function decorator that will append an addendum to the docstring
of the target function.
This decorator should be robust even if func.__doc__ is None
Usage: construct a docstring.Appender with a string to be joined to
the original docstring. An optional 'join' parameter may be supplied
which will be used to join the docstring and addendum. e.g.
add_copyright = Appender("Copyright (c) 2009", join='\n')
@add_copyright
def my_dog(has='fleas'):
"This docstring will have a copyright below"
pass
"""
addendum: t.Optional[str]
def __init__(self, addendum: t.Optional[str], join: str = '', indents: int = 0):
if indents > 0:
self.addendum = indent(addendum, indents=indents)
else:
self.addendum = addendum
self.join = join
def __call__(self, func: F) -> F:
"""Decorate a function."""
func.__doc__ = func.__doc__ if func.__doc__ else ''
self.addendum = self.addendum if self.addendum else ''
docitems = [func.__doc__, self.addendum]
func.__doc__ = textwrap.dedent(self.join.join(docitems))
return func
def indent(
text: t.Optional[str],
indents: int = 1,
prefix: bool = False
) -> str:
if not text or not isinstance(text, str):
return ''
identation = ''.join((INDENT for _ in range(indents)))
jointext = ''.join(('\n', identation))
output = jointext.join(text.split('\n'))
return output if prefix is False else f'{identation}{output}'
def deprecate_kwarg(
old_name: str,
new_name: t.Optional[str] = None,
) -> t.Callable[[F], F]:
"""Decorate a function with deprecated kwargs.
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : Optional[str], default None
Name of preferred argument in function.
"""
def _deprecate_kwarg(func: F) -> F:
@wraps(func)
def wrapper(*args, **kwargs) -> t.Callable[..., t.Any]:
if old_name in kwargs and new_name in kwargs:
raise TypeError(
f'Can only specify {repr(old_name)} '
f'or {repr(new_name)}, not both'
)
elif old_name in kwargs and new_name is None:
get_logger().warning(
'the %s keyword is deprecated and '
'will be removed in a future version. Please take '
'steps to stop the use of %s',
repr(old_name),
repr(old_name)
)
elif old_name in kwargs and new_name is not None:
get_logger().warning(
'the %s keyword is deprecated, '
'use %s instead',
repr(old_name),
repr(new_name)
)
kwargs[new_name] = kwargs.pop(old_name)
return func(*args, **kwargs)
return t.cast(F, wrapper)
return _deprecate_kwarg
def get_routine_name(it: t.Any) -> str:
if hasattr(it, '__qualname__'):
return it.__qualname__
elif callable(it) or isinstance(it, type):
return it.__name__
else:
return type(it).__name__ | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/utils/decorators.py | 0.811564 | 0.253982 | decorators.py | pypi |
"""Utils module containing feature importance calculations."""
# TODO: move tabular functionality to the tabular sub-package
import time
import typing as t
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_datetime_or_timedelta_dtype, is_float_dtype, is_numeric_dtype
from sklearn.inspection import permutation_importance
from sklearn.pipeline import Pipeline
from runml_checks import tabular
from runml_checks.core import errors
from runml_checks.tabular.utils.validation import validate_model
from runml_checks.utils.logger import get_logger
from runml_checks.utils.metrics import runml_checkscorer, get_default_scorers, init_validate_scorers, task_type_check
from runml_checks.utils.strings import is_string_column
from runml_checks.utils.typing import Hashable
from runml_checks.utils.validation import ensure_hashable_or_mutable_sequence
__all__ = [
'calculate_feature_importance',
'calculate_feature_importance_or_none',
'column_importance_sorter_dict',
'column_importance_sorter_df',
'infer_categorical_features',
'infer_numerical_features',
'is_categorical',
'N_TOP_MESSAGE'
]
N_TOP_MESSAGE = '* showing only the top %s columns, you can change it using n_top_columns param'
def calculate_feature_importance_or_none(
model: t.Any,
dataset: t.Union['tabular.Dataset', pd.DataFrame],
force_permutation: bool = False,
permutation_kwargs: t.Optional[t.Dict[str, t.Any]] = None,
) -> t.Tuple[t.Optional[pd.Series], t.Optional[str]]:
"""Calculate features effect on the label or None if the input is incorrect.
Parameters
----------
model : t.Any
a fitted model
dataset : t.Union['tabular.Dataset', pd.DataFrame]
dataset used to fit the model
force_permutation : bool , default: False
force permutation importance calculation
permutation_kwargs : t.Optional[t.Dict[str, t.Any]] , default: None
kwargs for permutation importance calculation
Returns
-------
feature_importance, calculation_type : t.Tuple[t.Optional[pd.Series], str]]
features importance normalized to 0-1 indexed by feature names, or None if the input is incorrect
Tuple of the features importance and the calculation type
(types: `permutation_importance`, `feature_importances_`, `coef_`)
"""
try:
if model is None:
return None
# calculate feature importance if dataset has a label and the model is fitted on it
fi, calculation_type = calculate_feature_importance(
model=model,
dataset=dataset,
force_permutation=force_permutation,
permutation_kwargs=permutation_kwargs,
)
return fi, calculation_type
except (
errors.runml_checksValueError,
errors.NumberOfFeaturesLimitError,
errors.runml_checksTimeoutError,
errors.ModelValidationError,
errors.DatasetValidationError
) as error:
# runml_checksValueError:
# if model validation failed;
# if it was not possible to calculate features importance;
# NumberOfFeaturesLimitError:
# if the number of features limit were exceeded;
# DatasetValidationError:
# if dataset did not meet requirements
# ModelValidationError:
# if wrong type of model was provided;
# if function failed to predict on model;
get_logger().warning('Features importance was not calculated:\n%s', error)
return None, None
def calculate_feature_importance(
model: t.Any,
dataset: t.Union['tabular.Dataset', pd.DataFrame],
force_permutation: bool = False,
permutation_kwargs: t.Dict[str, t.Any] = None,
) -> t.Tuple[pd.Series, str]:
"""Calculate features effect on the label.
Parameters
----------
model : t.Any
a fitted model
dataset : t.Union['tabular.Dataset', pd.DataFrame]
dataset used to fit the model
force_permutation : bool, default: False
force permutation importance calculation
permutation_kwargs : t.Dict[str, t.Any] , default: None
kwargs for permutation importance calculation
Returns
-------
Tuple[Series, str]:
first item - feature importance normalized to 0-1 indexed by feature names,
second item - type of feature importance calculation (types: `permutation_importance`,
`feature_importances_`, `coef_`)
Raises
------
NotFittedError
Call 'fit' with appropriate arguments before using this estimator.
runml_checksValueError
if model validation failed.
if it was not possible to calculate features importance.
NumberOfFeaturesLimitError
if the number of features limit were exceeded.
"""
permutation_kwargs = permutation_kwargs or {}
permutation_kwargs['random_state'] = permutation_kwargs.get('random_state', 42)
validate_model(dataset, model)
permutation_failure = None
calc_type = None
importance = None
if force_permutation:
if isinstance(dataset, pd.DataFrame):
permutation_failure = 'Cannot calculate permutation feature importance on a pandas Dataframe, using ' \
'built-in model\'s feature importance instead. In order to force permutation ' \
'feature importance, please use the Dataset object.'
else:
try:
importance = _calc_permutation_importance(model, dataset, **permutation_kwargs)
calc_type = 'permutation_importance'
except errors.runml_checksTimeoutError as e:
permutation_failure = f'{e.message}\n using model\'s built-in feature importance instead'
# If there was no force permutation, or if it failed while trying to calculate importance,
# we don't take built-in importance in pipelines because the pipeline is changing the features
# (for example one-hot encoding) which leads to the inner model features
# being different than the original dataset features
if importance is None and not isinstance(model, Pipeline):
# Get the actual model in case of pipeline
importance, calc_type = _built_in_importance(model, dataset)
# If found importance and was force permutation failure before, show warning
if importance is not None and permutation_failure:
get_logger().warning(permutation_failure)
# If there was no permutation failure and no importance on the model, using permutation anyway
if importance is None and permutation_failure is None and isinstance(dataset, tabular.Dataset):
if not permutation_kwargs.get('skip_messages', False):
if isinstance(model, Pipeline):
pre_text = 'Cannot use model\'s built-in feature importance on a Scikit-learn Pipeline,'
else:
pre_text = 'Could not find built-in feature importance on the model,'
get_logger().warning('%s using permutation feature importance calculation instead', pre_text)
importance = _calc_permutation_importance(model, dataset, **permutation_kwargs)
calc_type = 'permutation_importance'
# If after all importance is still none raise error
if importance is None:
# FIXME: better message
raise errors.runml_checksValueError("Was not able to calculate features importance")
return importance.fillna(0), calc_type
def _built_in_importance(
model: t.Any,
dataset: t.Union['tabular.Dataset', pd.DataFrame],
) -> t.Tuple[t.Optional[pd.Series], t.Optional[str]]:
"""Get feature importance member if present in model."""
features = dataset.features if isinstance(dataset, tabular.Dataset) else dataset.columns
if hasattr(model, 'feature_importances_'): # Ensembles
if model.feature_importances_ is None:
return None, None
normalized_feature_importance_values = model.feature_importances_ / model.feature_importances_.sum()
return pd.Series(normalized_feature_importance_values, index=features), 'feature_importances_'
if hasattr(model, 'coef_'): # Linear models
if model.coef_ is None:
return None, None
coef = np.abs(model.coef_.flatten())
coef = coef / coef.sum()
return pd.Series(coef, index=features), 'coef_'
return None, None
def _calc_permutation_importance(
model: t.Any,
dataset: 'tabular.Dataset',
n_repeats: int = 30,
mask_high_variance_features: bool = False,
random_state: int = 42,
n_samples: int = 10_000,
alternative_scorer: t.Optional[runml_checkscorer] = None,
skip_messages: bool = False,
timeout: int = None
) -> pd.Series:
"""Calculate permutation feature importance. Return nonzero value only when std doesn't mask signal.
Parameters
----------
model: t.Any
A fitted model
dataset: tabular.Dataset
dataset used to fit the model
n_repeats: int, default: 30
Number of times to permute a feature
mask_high_variance_features : bool , default: False
If true, features for which calculated permutation importance values
varied greatly would be returned has having 0 feature importance
random_state: int, default: 42
Random seed for permutation importance calculation.
n_samples: int, default: 10_000
The number of samples to draw from X to compute feature importance
in each repeat (without replacement).
alternative_scorer: t.Optional[runml_checkscorer], default: None
Scorer to use for evaluation of the model performance in the permutation_importance function. If not defined,
the default runml_checks scorers are used.
skip_messages: bool, default: False
If True will not print any message related to timeout or calculation.
timeout: int, default: None
Allowed runtime of permutation_importance, in seconds. As we can't limit the actual runtime of the function,
the timeout parameter is used for estimation of the runtime, done be measuring the inference time of the model
and multiplying it by number of repeats and features. If the expected runtime is bigger than timeout, the
calculation is skipped.
Returns
-------
pd.Series
feature importance normalized to 0-1 indexed by feature names
"""
if dataset.label_name is None:
raise errors.DatasetValidationError("Expected dataset with label.")
if len(dataset.features) == 1:
return pd.Series([1], index=dataset.features)
dataset_sample = dataset.sample(n_samples, drop_na_label=True, random_state=random_state)
# Test score time on the dataset sample
if alternative_scorer:
scorer = alternative_scorer
else:
task_type = task_type_check(model, dataset)
default_scorers = get_default_scorers(task_type)
scorer_name = next(iter(default_scorers))
single_scorer_dict = {scorer_name: default_scorers[scorer_name]}
scorer = init_validate_scorers(single_scorer_dict, model, dataset, model_type=task_type)[0]
start_time = time.time()
scorer(model, dataset_sample)
calc_time = time.time() - start_time
predicted_time_to_run = int(np.ceil(calc_time * n_repeats * len(dataset.features)))
if timeout is not None:
if predicted_time_to_run > timeout:
raise errors.runml_checksTimeoutError(
f'Skipping permutation importance calculation: calculation was projected to finish in '
f'{predicted_time_to_run} seconds, but timeout was configured to {timeout} seconds')
elif not skip_messages:
get_logger().info('Calculating permutation feature importance. Expected to finish in %s seconds',
predicted_time_to_run)
elif not skip_messages:
get_logger().warning('Calculating permutation feature importance without time limit. Expected to finish in '
'%s seconds', predicted_time_to_run)
r = permutation_importance(
model,
dataset_sample.features_columns,
dataset_sample.label_col,
n_repeats=n_repeats,
random_state=random_state,
n_jobs=-1,
scoring=scorer.scorer
)
significance_mask = (
r.importances_mean - r.importances_std > 0
if mask_high_variance_features
else r.importances_mean > 0
)
feature_importances = r.importances_mean * significance_mask
total = feature_importances.sum()
if total != 0:
feature_importances = feature_importances / total
return pd.Series(feature_importances, index=dataset.features)
def get_importance(name: str, feature_importances: pd.Series, ds: 'tabular.Dataset') -> int:
"""Return importance based on feature importance or label/date/index first."""
if name in feature_importances.keys():
return feature_importances[name]
if name in [ds.label_name, ds.datetime_name, ds.index_name]:
return 1
return 0
def column_importance_sorter_dict(
cols_dict: t.Dict[Hashable, t.Any],
dataset: 'tabular.Dataset',
feature_importances: t.Optional[pd.Series] = None,
n_top: int = 10
) -> t.Dict:
"""Return the dict of columns sorted and limited by feature importance.
Parameters
----------
cols_dict : t.Dict[Hashable, t.Any]
dict where columns are the keys
dataset : tabular.Dataset
dataset used to fit the model
feature_importances : t.Optional[pd.Series] , default: None
feature importance normalized to 0-1 indexed by feature names
n_top : int , default: 10
amount of columns to show ordered by feature importance (date, index, label are first)
Returns
-------
Dict
the dict of columns sorted and limited by feature importance.
"""
feature_importances = {} if feature_importances is None else feature_importances
key = lambda name: get_importance(name[0], feature_importances, dataset)
cols_dict = dict(sorted(cols_dict.items(), key=key, reverse=True))
if n_top:
return dict(list(cols_dict.items())[:n_top])
return cols_dict
def column_importance_sorter_df(
df: pd.DataFrame,
ds: 'tabular.Dataset',
feature_importances: pd.Series,
n_top: int = 10,
col: t.Optional[Hashable] = None
) -> pd.DataFrame:
"""Return the dataframe of columns sorted and limited by feature importance.
Parameters
----------
df : pd.DataFrame
DataFrame to sort
ds : tabular.Dataset
dataset used to fit the model
feature_importances : pd.Series
feature importance normalized to 0-1 indexed by feature names
n_top : int , default: 10
amount of columns to show ordered by feature importance (date, index, label are first)
col : t.Optional[Hashable] , default: None
name of column to sort the dataframe
Returns
-------
pd.DataFrame
the dataframe sorted and limited by feature importance.
"""
if len(df) == 0:
return df
feature_importances = {} if feature_importances is None else feature_importances
key = lambda column: [get_importance(name, feature_importances, ds) for name in column]
if col:
df = df.sort_values(by=[col], key=key, ascending=False)
df = df.sort_index(key=key, ascending=False)
if n_top:
return df.head(n_top)
return df
def infer_numerical_features(df: pd.DataFrame) -> t.List[Hashable]:
"""Infers which features are numerical.
Parameters
----------
df : pd.DataFrame
dataframe for which to infer numerical features
Returns
-------
List[Hashable]
list of numerical features
"""
columns = df.columns
numerical_columns = []
for col in columns:
col_data = df[col]
if col_data.dtype == 'object':
# object might still be only floats, so we rest the dtype
col_data = pd.Series(col_data.to_list())
if is_numeric_dtype(col_data):
numerical_columns.append(col)
return numerical_columns
def infer_categorical_features(
df: pd.DataFrame,
max_categorical_ratio: float = 0.01,
max_categories: int = None,
columns: t.Optional[t.List[Hashable]] = None,
) -> t.List[Hashable]:
"""Infers which features are categorical by checking types and number of unique values.
Parameters
----------
df : pd.DataFrame
dataframe for which to infer categorical features
max_categorical_ratio : float , default: 0.01
max_categories : int , default: None
columns : t.Optional[t.List[Hashable]] , default: None
Returns
-------
List[Hashable]
list of categorical features
"""
categorical_dtypes = df.select_dtypes(include='category')
if len(categorical_dtypes.columns) > 0:
return list(categorical_dtypes.columns)
if columns is not None:
dataframe_columns = ensure_hashable_or_mutable_sequence(columns)
else:
dataframe_columns = df.columns
if max_categories is None:
return [
column
for column in dataframe_columns
if is_categorical(
t.cast(pd.Series, df[column]),
max_categorical_ratio)]
else:
return [
column
for column in dataframe_columns
if is_categorical(
t.cast(pd.Series, df[column]),
max_categorical_ratio,
max_categories,
max_categories,
max_categories)]
def is_categorical(
column: pd.Series,
max_categorical_ratio: float = 0.01,
max_categories_type_string: int = 150,
max_categories_type_int: int = 30,
max_categories_type_float_or_datetime: int = 5
) -> bool:
"""Check if uniques are few enough to count as categorical.
Parameters
----------
column : pd.Series
A dataframe column
max_categorical_ratio : float , default: 0.01
max_categories_type_string : int , default: 150
max_categories_type_int : int , default: 30
max_categories_type_float_or_datetime : int , default: 5
Returns
-------
bool
True if is categorical according to input numbers
"""
n_samples = len(column.dropna())
if n_samples == 0:
get_logger().warning('Column %s only contains NaN values.', column.name)
return False
n_samples = np.max([n_samples, 1000])
n_unique = column.nunique(dropna=True)
if is_string_column(column):
return (n_unique / n_samples) < max_categorical_ratio and n_unique <= max_categories_type_string
elif (is_float_dtype(column) and np.max(column % 1) > 0) or is_datetime_or_timedelta_dtype(column):
return (n_unique / n_samples) < max_categorical_ratio and n_unique <= max_categories_type_float_or_datetime
elif is_numeric_dtype(column):
return (n_unique / n_samples) < max_categorical_ratio and n_unique <= max_categories_type_int
else:
return False | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/utils/features.py | 0.774071 | 0.437163 | features.py | pypi |
"""Utils module containing utilities for plotting."""
import matplotlib.pyplot as plt
import numpy as np
import plotly.graph_objects as go
from matplotlib.cm import ScalarMappable
from matplotlib.colors import LinearSegmentedColormap
from runml_checks.utils.strings import format_number_if_not_nan
__all__ = ['create_colorbar_barchart_for_check', 'shifted_color_map',
'create_confusion_matrix_figure', 'colors', 'hex_to_rgba']
colors = {'Train': '#00008b', # dark blue
'Test': '#69b3a2',
'Baseline': '#b287a3',
'Generated': '#2191FB'}
# iterable for displaying colors on metrics
metric_colors = ['rgb(102, 197, 204)',
'rgb(220, 176, 242)',
'rgb(135, 197, 95)',
'rgb(158, 185, 243)',
'rgb(254, 136, 177)',
'rgb(201, 219, 116)',
'rgb(139, 224, 164)',
'rgb(180, 151, 231)']
def create_colorbar_barchart_for_check(
x: np.ndarray,
y: np.ndarray,
ylabel: str = 'Result',
xlabel: str = 'Features',
color_map: str = 'RdYlGn_r',
start: float = 0,
stop: float = 1.0,
tick_steps: float = 0.1,
color_label: str = 'Color',
color_shift_midpoint: float = 0.5,
check_name: str = ''
):
"""Output a colorbar barchart using matplotlib.
Parameters
----------
x: np.ndarray
array containing x axis data.
y: np.ndarray
array containing y axis data.
ylabel: str , default: Result
Name of y axis
xlabel : str , default: Features
Name of x axis
color_map : str , default: RdYlGn_r
color_map name.
See https://matplotlib.org/stable/tutorials/colors/colormaps.html for more details
start : float , default: 0
start of y axis ticks
stop : float , default: 1.0
end of y axis ticks
tick_steps : float , default: 0.1
step to y axis ticks
color_shift_midpoint : float , default: 0.5
midpoint of color map
check_name : str , default: ''
name of the check that called this function
"""
fig, ax = plt.subplots(figsize=(15, 4)) # pylint: disable=unused-variable
try:
my_cmap = plt.cm.get_cmap(color_map + check_name)
except ValueError:
my_cmap = plt.cm.get_cmap(color_map)
my_cmap = shifted_color_map(my_cmap, start=start, midpoint=color_shift_midpoint, stop=stop,
name=color_map + check_name)
cmap_colors = my_cmap(list(y))
_ = ax.bar(x, y, color=cmap_colors) # pylint: disable=unused-variable
sm = ScalarMappable(cmap=my_cmap, norm=plt.Normalize(start, stop))
sm.set_array([])
cbar = plt.colorbar(sm)
cbar.set_label(color_label, rotation=270, labelpad=25)
plt.yticks(np.arange(start, stop, tick_steps))
plt.ylabel(ylabel)
plt.xlabel(xlabel)
def shifted_color_map(cmap, start=0, midpoint=0.5, stop=1.0, name: str = 'shiftedcmap', transparent_from: float = None):
"""Offset the "center" of a colormap.
Parameters
----------
cmap
The matplotlib colormap to be altered
start , default: 0
Offset from lowest point in the colormap's range.
Should be between0.0 and 1.0.
midpoint , default: 0.5
The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop , default: 1.0
Offset from highest point in the colormap's range.
Should be between0.0 and 1.0.
name: str , default: shiftedcmap
transparent_from : float , default: None
The point between start and stop where the colors will start being transparent.
"""
if transparent_from is None:
transparent_from = stop
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
if transparent_from / midpoint < si:
cdict['alpha'].append((si, 0.3, 0.3))
else:
cdict['alpha'].append((si, a, a))
newcmap = LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
def hex_to_rgba(h, alpha):
"""Convert color value in hex format to rgba format with alpha transparency."""
return 'rgba' + str(tuple([int(h.lstrip('#')[i:i+2], 16) for i in (0, 2, 4)] + [alpha]))
def create_confusion_matrix_figure(confusion_matrix: np.ndarray, x: np.ndarray,
y: np.ndarray, normalized: bool):
"""Create a confusion matrix figure.
Parameters
----------
confusion_matrix: np.ndarray
2D array containing the confusion matrix.
x: np.ndarray
array containing x axis data.
y: np.ndarray
array containing y axis data.
normalized: bool
if True will also show normalized values by the true values.
Returns
-------
plotly Figure object
confusion matrix figure
"""
if normalized:
confusion_matrix_norm = confusion_matrix.astype('float') / \
confusion_matrix.sum(axis=1)[:, np.newaxis] * 100
z = np.vectorize(format_number_if_not_nan)(confusion_matrix_norm)
texttemplate = '%{z}%<br>(%{text})'
colorbar_title = '% out of<br>True Values'
plot_title = 'Percent Out of True Values (Count)'
else:
z = confusion_matrix
colorbar_title = None
texttemplate = '%{text}'
plot_title = 'Value Count'
fig = go.Figure(data=go.Heatmap(
x=x,
y=y,
z=z,
text=confusion_matrix,
texttemplate=texttemplate))
fig.data[0].colorbar.title = colorbar_title
fig.update_layout(title=plot_title)
fig.update_layout(height=600)
fig.update_xaxes(title='Predicted Value', type='category', scaleanchor='y', constrain='domain')
fig.update_yaxes(title='True value', type='category', constrain='domain', autorange='reversed')
return fig | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/utils/plot.py | 0.943699 | 0.544256 | plot.py | pypi |
"""Utils module containing useful global functions."""
import logging
import os
import time
import typing as t
from functools import lru_cache
import tqdm
from ipykernel.zmqshell import ZMQInteractiveShell
from IPython import get_ipython
from IPython.display import display
from IPython.terminal.interactiveshell import TerminalInteractiveShell
from tqdm.notebook import tqdm as tqdm_notebook
from runml_checks.utils.logger import get_verbosity
__all__ = [
'is_notebook',
'is_headless',
'create_progress_bar',
'is_colab_env',
'is_kaggle_env',
'is_databricks_env',
'is_sagemaker_env',
'is_terminal_interactive_shell',
'is_zmq_interactive_shell',
'ProgressBarGroup'
]
@lru_cache(maxsize=None)
def is_notebook() -> bool:
"""Check if we're in an interactive context (Notebook, GUI support) or terminal-based.
Returns
-------
bool
True if we are in a notebook context, False otherwise
"""
try:
shell = get_ipython()
return hasattr(shell, 'config')
except NameError:
return False # Probably standard Python interpreter
@lru_cache(maxsize=None)
def is_terminal_interactive_shell() -> bool:
"""Check whether we are in a terminal interactive shell or not."""
return isinstance(get_ipython(), TerminalInteractiveShell)
@lru_cache(maxsize=None)
def is_zmq_interactive_shell() -> bool:
"""Check whether we are in a web-based interactive shell or not."""
return isinstance(get_ipython(), ZMQInteractiveShell)
@lru_cache(maxsize=None)
def is_headless() -> bool:
"""Check if the system can support GUI.
Returns
-------
bool
True if we cannot support GUI, False otherwise
"""
# pylint: disable=import-outside-toplevel
try:
import Tkinter as tk
except ImportError:
try:
import tkinter as tk
except ImportError:
return True
try:
root = tk.Tk()
except tk.TclError:
return True
root.destroy()
return False
@lru_cache(maxsize=None)
def is_colab_env() -> bool:
"""Check if we are in the google colab environment."""
return 'google.colab' in str(get_ipython())
@lru_cache(maxsize=None)
def is_kaggle_env() -> bool:
"""Check if we are in the kaggle environment."""
return os.environ.get('KAGGLE_KERNEL_RUN_TYPE') is not None
@lru_cache(maxsize=None)
def is_databricks_env() -> bool:
"""Check if we are in the databricks environment."""
return 'DATABRICKS_RUNTIME_VERSION' in os.environ
@lru_cache(maxsize=None)
def is_sagemaker_env() -> bool:
"""Check if we are in the AWS Sagemaker environment."""
return 'AWS_PATH' in os.environ
class HtmlProgressBar:
"""Progress bar implementation that uses html <progress> tag."""
STYLE = """
<style>
progress {
-webkit-appearance: none;
border: none;
border-radius: 3px;
width: 300px;
height: 20px;
vertical-align: middle;
margin-right: 10px;
background-color: aliceblue;
}
progress::-webkit-progress-bar {
border-radius: 3px;
background-color: aliceblue;
}
progress::-webkit-progress-value {
background-color: #9d60fb;
}
progress::-moz-progress-bar {
background-color: #9d60fb;
}
</style>
"""
def __init__(
self,
title: str,
unit: str,
iterable: t.Iterable[t.Any],
total: int,
metadata: t.Optional[t.Mapping[str, t.Any]] = None,
display_immediately: bool = False,
disable: bool = False,
):
self._title = title
self._unit = unit
self._iterable = iterable
self._total = total
self._seconds_passed = 0
self._inital_metadata = dict(metadata) if metadata else {}
self._metadata = self._inital_metadata.copy()
self._progress_bar = None
self._current_item_index = 0
display({'text/html': self.STYLE}, raw=True)
self._display_handler = display({'text/html': ''}, raw=True, display_id=True)
self._disable = disable
self._reuse_counter = 0
if disable is False and display_immediately is True:
self.refresh()
def __iter__(self):
"""Iterate over iterable."""
if self._disable is True:
try:
for it in self._iterable:
yield it
finally:
self._reuse_counter += 1
return
if self._reuse_counter > 0:
self._seconds_passed = 0
self._current_item_index = 0
self._progress_bar = None
self._metadata = self._inital_metadata
self.clean()
started_at = time.time()
try:
self.refresh()
for i, it in enumerate(self._iterable, start=1):
yield it
self._current_item_index = i
self._seconds_passed = int(time.time() - started_at)
self.refresh()
finally:
self._reuse_counter += 1
self.close()
def refresh(self):
"""Refresh progress bar."""
self.progress_bar = self.create_progress_bar(
title=self._title,
item=self._current_item_index,
total=self._total,
seconds_passed=self._seconds_passed,
metadata=self._metadata
)
self._display_handler.update(
{'text/html': self.progress_bar},
raw=True
)
def close(self):
"""Close progress bar."""
self._display_handler.update({'text/html': ''}, raw=True)
def clean(self):
"""Clean display cell."""
self._display_handler.update({'text/html': ''}, raw=True)
def set_postfix(self, data: t.Mapping[str, t.Any], refresh: bool = True):
"""Set postfix."""
self.update_metadata(data, refresh)
def reset_metadata(self, data: t.Mapping[str, t.Any], refresh: bool = True):
"""Reset metadata."""
self._metadata = dict(data)
if refresh is True:
self.refresh()
def update_metadata(self, data: t.Mapping[str, t.Any], refresh: bool = True):
"""Update metadata."""
self._metadata.update(data)
if refresh is True:
self.refresh()
@classmethod
def create_label(
cls,
item: int,
total: int,
seconds_passed: int,
metadata: t.Optional[t.Mapping[str, t.Any]] = None
):
"""Create progress bar label."""
minutes = seconds_passed // 60
seconds = seconds_passed - (minutes * 60)
minutes = f'0{minutes}' if minutes < 10 else str(minutes)
seconds = f'0{seconds}' if seconds < 10 else str(seconds)
if metadata:
metadata_string = ', '.join(f'{k}={str(v)}' for k, v in metadata.items())
metadata_string = f', {metadata_string}'
else:
metadata_string = ''
return f'{item}/{total} [Time: {minutes}:{seconds}{metadata_string}]'
@classmethod
def create_progress_bar(
cls,
title: str,
item: int,
total: int,
seconds_passed: int,
metadata: t.Optional[t.Mapping[str, t.Any]] = None
) -> str:
"""Create progress bar."""
return f"""
<div>
<label>
{title}:<br/>
<progress
value='{item}'
max='{total}'
class='runml_checks'
>
</progress>
</label>
<span>{cls.create_label(item, total, seconds_passed, metadata)}</span>
</div>
"""
def create_progress_bar(
name: str,
unit: str,
total: t.Optional[int] = None,
iterable: t.Optional[t.Sequence[t.Any]] = None,
) -> t.Union[
tqdm_notebook,
HtmlProgressBar,
tqdm.tqdm
]:
"""Create a progress bar instance."""
if iterable is not None:
iterlen = len(iterable)
elif total is not None:
iterlen = total
else:
raise ValueError(
'at least one of the parameters iterable | total must be not None'
)
is_disabled = get_verbosity() >= logging.WARNING
if is_zmq_interactive_shell():
return HtmlProgressBar(
title=name,
unit=unit,
total=iterlen,
iterable=iterable or range(iterlen),
display_immediately=True,
disable=is_disabled
)
else:
barlen = iterlen if iterlen > 5 else 5
rbar = ' {n_fmt}/{total_fmt} [Time: {elapsed}{postfix}]'
bar_format = f'{{desc}}:\n|{{bar:{barlen}}}|{rbar}'
return tqdm.tqdm(
iterable=iterable,
total=total,
desc=name,
unit=f' {unit}',
leave=False,
bar_format=bar_format,
disable=is_disabled,
)
class DummyProgressBar:
"""Dummy progress bar that has only one step."""
def __init__(self, name: str, unit: str = '') -> None:
self.pb = create_progress_bar(
iterable=list(range(1)),
name=name,
unit=unit
)
def __enter__(self, *args, **kwargs):
"""Enter context."""
return self
def __exit__(self, *args, **kwargs):
"""Exit context."""
for _ in self.pb:
pass
class ProgressBarGroup:
"""Progress Bar Factory.
Utility class that makes sure that all progress bars in the
group will be closed simultaneously.
"""
register: t.List[t.Union[
DummyProgressBar,
tqdm_notebook,
HtmlProgressBar,
tqdm.tqdm
]]
def __init__(self) -> None:
self.register = []
def create(
self,
name: str,
unit: str,
total: t.Optional[int] = None,
iterable: t.Optional[t.Sequence[t.Any]] = None,
) -> t.Union[
tqdm_notebook,
HtmlProgressBar,
tqdm.tqdm
]:
"""Create progress bar instance."""
pb = create_progress_bar(
name=name,
unit=unit,
total=total,
iterable=iterable
)
pb.__original_close__, pb.close = (
pb.close,
lambda *args, s=pb, **kwargs: s.refresh()
)
self.register.append(pb)
return pb
def create_dummy(
self,
name: str,
unit: str = ''
) -> DummyProgressBar:
"""Create dummy progress bar instance."""
dpb = DummyProgressBar(name=name, unit=unit)
dpb.__original_close__, dpb.pb.close = (
dpb.pb.close,
lambda *args, s=dpb.pb, **kwargs: s.refresh()
)
self.register.append(dpb)
return dpb
def __enter__(self, *args, **kwargs):
"""Enter context."""
return self
def __exit__(self, *args, **kwargs):
"""Enter context and close all progress bars.."""
for pb in self.register:
if hasattr(pb, '__original_close__'):
pb.__original_close__() | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/utils/ipython.py | 0.741674 | 0.175467 | ipython.py | pypi |
"""Common metrics to calculate performance on single samples."""
from typing import Union
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.preprocessing import LabelBinarizer
from runml_checks import Dataset
from runml_checks.core.errors import runml_checksNotImplementedError
from runml_checks.tabular.utils.task_type import TaskType
def calculate_per_sample_loss(model, task_type: TaskType, dataset: Dataset,
classes_index_order: Union[np.array, pd.Series, None] = None) -> pd.Series:
"""Calculate error per sample for a given model and a dataset."""
if task_type == TaskType.REGRESSION:
return pd.Series([metrics.mean_squared_error([y], [y_pred]) for y, y_pred in
zip(model.predict(dataset.features_columns), dataset.label_col)], index=dataset.data.index)
else:
if not classes_index_order:
if hasattr(model, 'classes_'):
classes_index_order = model.classes_
else:
raise runml_checksNotImplementedError(
'Could not infer classes index order. Please provide them via the classes_index_order '
'argument. Alternatively, provide loss_per_sample vector as an argument to the check.')
proba = model.predict_proba(dataset.features_columns)
return pd.Series([metrics.log_loss([y], [y_proba], labels=classes_index_order) for
y_proba, y in zip(proba, dataset.label_col)], index=dataset.data.index)
def per_sample_cross_entropy(y_true: np.array, y_pred: np.array, eps=1e-15):
"""Calculate cross entropy on a single sample.
This code is based on the code for sklearn log_loss metric, without the averaging over all samples. Licence below:
BSD 3-Clause License
Copyright (c) 2007-2021 The scikit-learn developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
y_true = np.array(y_true)
# Make y_true into one-hot
# We assume that the integers in y_true correspond to the columns in y_pred, such that if y_true[i] = k, then
# the corresponding predicted probability would be y_pred[i, k]
lb = LabelBinarizer()
lb.fit(list(range(y_pred.shape[1])))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(
1 - transformed_labels, transformed_labels, axis=1
)
# clip and renormalization y_pred
y_pred = y_pred.astype('float').clip(eps, 1 - eps)
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
return -(transformed_labels * np.log(y_pred)).sum(axis=1)
def per_sample_mse(y_true, y_pred):
"""Calculate mean square error on a single value."""
return (y_true - y_pred) ** 2 | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/utils/single_sample_metrics.py | 0.931967 | 0.541348 | single_sample_metrics.py | pypi |
"""Module of preprocessing functions."""
import warnings
# pylint: disable=invalid-name,unused-argument
from collections import Counter
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=FutureWarning)
from category_encoders import OneHotEncoder
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler
from runml_checks.core.errors import runml_checksValueError
from runml_checks.utils.distribution.rare_category_encoder import RareCategoryEncoder
from runml_checks.utils.typing import Hashable
__all__ = ['ScaledNumerics', 'preprocess_2_cat_cols_to_same_bins', 'value_frequency']
OTHER_CATEGORY_NAME = 'Other rare categories'
class ScaledNumerics(TransformerMixin, BaseEstimator):
"""Preprocess given features to scaled numerics.
Parameters
----------
categorical_columns : List[Hashable]
Indicates names of categorical columns in features.
max_num_categories : int
Indicates the maximum number of unique categories in a single categorical column
(rare categories will be changed to a form of "other")
"""
def __init__(self, categorical_columns: List[Hashable], max_num_categories: int):
self.one_hot_encoder = None
self.rare_category_encoder = None
self.scaler = None
self.numeric_imputer = None
self.categorical_imputer = None
self.not_cat_columns = None
self.cat_columns = categorical_columns
self.max_num_categories = max_num_categories
def fit(self, X: pd.DataFrame):
"""Fit scaler based on given dataframe."""
self.not_cat_columns = list(set(X.columns) - set(self.cat_columns))
# SimpleImputer doesn't work on all nan-columns, so first replace them # noqa: SC100
X = X.apply(ScaledNumerics._impute_whole_series_to_zero, axis=0)
if self.cat_columns:
self.categorical_imputer = SimpleImputer(strategy='most_frequent')
self.categorical_imputer.fit(X[self.cat_columns])
if self.not_cat_columns:
self.numeric_imputer = SimpleImputer(strategy='mean')
self.numeric_imputer.fit(X[self.not_cat_columns])
self.scaler = MinMaxScaler()
self.scaler.fit(X[self.not_cat_columns])
# Replace non-common categories with special value:
self.rare_category_encoder = RareCategoryEncoder(max_num_categories=self.max_num_categories,
cols=self.cat_columns)
self.rare_category_encoder.fit(X)
# One-hot encode categorical features:
self.one_hot_encoder = OneHotEncoder(cols=self.cat_columns, use_cat_names=True)
self.one_hot_encoder.fit(X)
def transform(self, X: pd.DataFrame):
"""Transform features into scaled numerics."""
# Impute all-nan cols to all-zero:
X = X.copy()
X = X.apply(ScaledNumerics._impute_whole_series_to_zero, axis=0)
if self.cat_columns:
X[self.cat_columns] = self.categorical_imputer.transform(X[self.cat_columns])
if self.not_cat_columns:
X[self.not_cat_columns] = self.numeric_imputer.transform(X[self.not_cat_columns])
X[self.not_cat_columns] = self.scaler.transform(X[self.not_cat_columns])
X = self.rare_category_encoder.transform(X)
X = self.one_hot_encoder.transform(X)
return X
def fit_transform(self, X, y=None, **fit_params):
"""Fit scaler based on given dataframe and then transform it."""
self.fit(X)
return self.transform(X)
@staticmethod
def _impute_whole_series_to_zero(s: pd.Series):
"""If given series contains only nones, return instead series with only zeros."""
if s.isna().all():
return pd.Series(np.zeros(s.shape))
else:
return s
def preprocess_2_cat_cols_to_same_bins(dist1: Union[np.ndarray, pd.Series], dist2: Union[np.ndarray, pd.Series],
max_num_categories: int = None, sort_by: str = 'difference'
) -> Tuple[np.ndarray, np.ndarray, List]:
"""
Preprocess distributions to the same bins.
Function returns value counts for each distribution (with the same index) and the categories list.
Parameter max_num_categories can be used in order to limit the number of resulting bins.
Function is for categorical data only.
Parameters
----------
dist1: Union[np.ndarray, pd.Series]
list of values from the first distribution.
dist2: Union[np.ndarray, pd.Series]
list of values from the second distribution.
max_num_categories: int, default: None
max number of allowed categories. If there are more categories than this number, categories are ordered by
magnitude and all the smaller categories are binned into an "Other" category.
If max_num_categories=None, there is no limit.
> Note that if this parameter is used, the ordering of categories (and by extension, the decision which
categories are kept by name and which are binned to the "Other" category) is done by default according to the
values of dist1, which is treated as the "expected" distribution. This behavior can be changed by using the
sort_by parameter.
sort_by: str, default: 'difference'
Specify how categories should be sorted, affecting which categories will get into the "Other" category.
Possible values:
- 'dist1': Sort by the largest dist1 categories.
- 'difference': Sort by the largest difference between categories.
> Note that this parameter has no effect if max_num_categories = None or there are not enough unique categories.
Returns
-------
dist1_percents
array of percentages of each value in the first distribution.
dist2_percents
array of percentages of each value in the second distribution.
categories_list
list of all categories that the percentages represent.
"""
all_categories = list(set(dist1).union(set(dist2)))
if max_num_categories is not None and len(all_categories) > max_num_categories:
dist1_counter = Counter(dist1)
dist2_counter = Counter(dist2)
if sort_by == 'dist1':
sort_by_counter = dist1_counter
elif sort_by == 'difference':
sort_by_counter = Counter({key: abs(dist1_counter[key] - dist2_counter[key])
for key in set(dist1_counter.keys()).union(dist2_counter.keys())})
else:
raise runml_checksValueError(f'sort_by got unexpected value: {sort_by}')
# Not using most_common func of Counter as it's not deterministic for equal values
categories_list = [x[0] for x in sorted(sort_by_counter.items(), key=lambda x: (-x[1], x[0]))][
:max_num_categories]
dist1_counter = {k: dist1_counter[k] for k in categories_list}
dist1_counter[OTHER_CATEGORY_NAME] = len(dist1) - sum(dist1_counter.values())
dist2_counter = {k: dist2_counter[k] for k in categories_list}
dist2_counter[OTHER_CATEGORY_NAME] = len(dist2) - sum(dist2_counter.values())
categories_list.append(OTHER_CATEGORY_NAME)
else:
dist1_counter = Counter(dist1)
dist2_counter = Counter(dist2)
categories_list = all_categories
# create an array from counters; this also aligns both counts to the same index
dist1_counts = np.array([dist1_counter[k] for k in categories_list])
dist2_counts = np.array([dist2_counter[k] for k in categories_list])
return dist1_counts, dist2_counts, categories_list
def value_frequency(x: Union[List, np.ndarray, pd.Series]) -> List[float]:
"""
Calculate the value frequency of x.
Parameters:
-----------
x: Union[List, np.ndarray, pd.Series]
A sequence of a categorical variable values.
Returns:
--------
List[float]
Representing the value frequency of x.
"""
x_values_counter = Counter(x)
total_occurrences = len(x)
values_probabilities = list(map(lambda n: n / total_occurrences, x_values_counter.values()))
return values_probabilities | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/utils/distribution/preprocessing.py | 0.93318 | 0.416737 | preprocessing.py | pypi |
"""A module containing utils for plotting distributions."""
import typing as t
from functools import cmp_to_key
from numbers import Number
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from scipy.stats import gaussian_kde
from typing_extensions import Literal as L
__all__ = ['feature_distribution_traces', 'drift_score_bar_traces', 'get_density']
from typing import Dict, List, Tuple
from runml_checks.core.errors import runml_checksValueError
from runml_checks.utils.dataframes import un_numpy
from runml_checks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins
from runml_checks.utils.plot import colors
# For numerical plots, below this number of unique values we draw bar plots, else KDE
MAX_NUMERICAL_UNIQUE_FOR_BARS = 20
# For numerical plots, where the total unique is above MAX_NUMERICAL_UNIQUE_FOR_BARS, if any of the single
# datasets have unique values above this number, we draw KDE, else we draw bar plots. Should be less than half of
# MAX_NUMERICAL_UNIQUE_FOR_BARS
MAX_NUMERICAL_UNIQUES_FOR_SINGLE_DIST_BARS = 5
def get_density(data, xs) -> np.ndarray:
"""Get gaussian kde density to plot.
Parameters
----------
data
The data used to compute the pdf function.
xs : iterable
List of x values to plot the computed pdf for.
Returns
-------
np.array
The computed pdf values at the points xs.
"""
# Is only single value adds noise, otherwise there is singular matrix error
if len(np.unique(data)) == 1:
data = data + np.random.normal(scale=10 * np.finfo(np.float32).eps, size=len(data))
density = gaussian_kde(data)
density.covariance_factor = lambda: .25
# pylint: disable=protected-access
density._compute_covariance()
return density(xs)
def drift_score_bar_traces(drift_score: float, bar_max: float = None) -> Tuple[List[go.Bar], Dict, Dict]:
"""Create a traffic light bar traces for drift score.
Parameters
----------
drift_score : float
Drift score
bar_max : float , default: None
Maximum value for the bar
Returns
-------
Tuple[List[go.Bar], Dict, Dict]
list of plotly bar traces.
"""
traffic_light_colors = [((0, 0.1), '#01B8AA'),
((0.1, 0.2), '#F2C80F'),
((0.2, 0.3), '#FE9666'),
((0.3, 1), '#FD625E')
]
bars = []
for range_tuple, color in traffic_light_colors:
if drift_score < range_tuple[0]:
break
bars.append(go.Bar(
x=[min(drift_score, range_tuple[1]) - range_tuple[0]],
y=['Drift Score'],
orientation='h',
marker=dict(
color=color,
),
offsetgroup=0,
base=range_tuple[0],
showlegend=False
))
bar_stop = max(0.4, drift_score + 0.1)
if bar_max:
bar_stop = min(bar_stop, bar_max)
xaxis = dict(
showgrid=False,
gridcolor='black',
linecolor='black',
range=[0, bar_stop],
dtick=0.05,
fixedrange=True
)
yaxis = dict(
showgrid=False,
showline=False,
showticklabels=False,
zeroline=False,
color='black',
autorange=True,
rangemode='normal',
fixedrange=True
)
return bars, xaxis, yaxis
CategoriesSortingKind = t.Union[L['train_largest'], L['test_largest'], L['largest_difference']] # noqa: F821
def feature_distribution_traces(
train_column: t.Union[np.ndarray, pd.Series],
test_column: t.Union[np.ndarray, pd.Series],
column_name: str,
is_categorical: bool = False,
max_num_categories: int = 10,
show_categories_by: CategoriesSortingKind = 'largest_difference',
quantile_cut: float = 0.02
) -> Tuple[List[go.Trace], Dict, Dict]:
"""Create traces for comparison between train and test column.
Parameters
----------
train_column
Train data used to trace distribution.
test_column
Test data used to trace distribution.
column_name
The name of the column values on the x axis.
is_categorical : bool , default: False
State if column is categorical.
max_num_categories : int , default: 10
Maximum number of categories to show in plot (default: 10).
show_categories_by: str, default: 'largest_difference'
Specify which categories to show for categorical features' graphs, as the number of shown categories is limited
by max_num_categories_for_display. Possible values:
- 'train_largest': Show the largest train categories.
- 'test_largest': Show the largest test categories.
- 'largest_difference': Show the largest difference between categories.
quantile_cut : float , default: 0.02
in which quantile to cut the edges of the plot
Returns
-------
List[Union[go.Bar, go.Scatter]]
list of plotly traces.
Dict
layout of x axis
Dict
layout of y axis
Dict
general layout
"""
if is_categorical:
n_of_categories = len(set(train_column).union(test_column))
range_max = (
max_num_categories
if n_of_categories > max_num_categories
else n_of_categories
)
traces, y_layout = _create_distribution_bar_graphs(
train_column,
test_column,
max_num_categories,
show_categories_by
)
xaxis_layout = dict(
type='category',
# NOTE:
# the range, in this case, is needed to fix a problem with
# too wide bars when there are only one or two of them`s on
# the plot, plus it also centralizes them`s on the plot
# The min value of the range (range(min. max)) is bigger because
# otherwise bars will not be centralized on the plot, they will
# appear on the left part of the plot (that is probably because of zero)
range=(-3, range_max + 2)
)
return traces, xaxis_layout, y_layout
else:
train_uniques, train_uniques_counts = np.unique(train_column, return_counts=True)
test_uniques, test_uniques_counts = np.unique(test_column, return_counts=True)
x_range = (
min(train_column.min(), test_column.min()),
max(train_column.max(), test_column.max())
)
# If there are less than 20 total unique values, draw bar graph
train_test_uniques = np.unique(np.concatenate([train_uniques, test_uniques]))
if train_test_uniques.size < MAX_NUMERICAL_UNIQUE_FOR_BARS:
traces, y_layout = _create_distribution_bar_graphs(train_column, test_column, 20, show_categories_by)
x_range = (x_range[0] - 5, x_range[1] + 5)
xaxis_layout = dict(ticks='outside', tickmode='array', tickvals=train_test_uniques, range=x_range)
return traces, xaxis_layout, y_layout
x_range_to_show = (
min(np.quantile(train_column, quantile_cut), np.quantile(test_column, quantile_cut)),
max(np.quantile(train_column, 1 - quantile_cut), np.quantile(test_column, 1 - quantile_cut))
)
# Heuristically take points on x-axis to show on the plot
# The intuition is the graph will look "smooth" wherever we will zoom it
# Also takes mean and median values in order to plot it later accurately
mean_train_column = np.mean(train_column)
mean_test_column = np.mean(test_column)
median_train_column = np.median(train_column)
median_test_column = np.median(test_column)
xs = sorted(np.concatenate((
np.linspace(x_range[0], x_range[1], 50),
np.quantile(train_column, q=np.arange(0.02, 1, 0.02)),
np.quantile(test_column, q=np.arange(0.02, 1, 0.02)),
[mean_train_column, mean_test_column, median_train_column, median_test_column]
)))
train_density = get_density(train_column, xs)
test_density = get_density(test_column, xs)
bars_width = (x_range_to_show[1] - x_range_to_show[0]) / 100
traces = []
if train_uniques.size <= MAX_NUMERICAL_UNIQUES_FOR_SINGLE_DIST_BARS:
traces.append(go.Bar(
x=train_uniques,
y=_create_bars_data_for_mixed_kde_plot(train_uniques_counts, np.max(test_density)),
width=[bars_width] * train_uniques.size,
marker=dict(color=colors['Train']),
name='Train Dataset',
))
else:
traces.extend(_create_distribution_scatter_plot(xs, train_density, mean_train_column, median_train_column,
is_train=True))
if test_uniques.size <= MAX_NUMERICAL_UNIQUES_FOR_SINGLE_DIST_BARS:
traces.append(go.Bar(
x=test_uniques,
y=_create_bars_data_for_mixed_kde_plot(test_uniques_counts, np.max(train_density)),
width=[bars_width] * test_uniques.size,
marker=dict(
color=colors['Test']
),
name='Test Dataset',
))
else:
traces.extend(_create_distribution_scatter_plot(xs, test_density, mean_test_column, median_test_column,
is_train=False))
xaxis_layout = dict(fixedrange=False,
range=x_range_to_show,
title=column_name)
yaxis_layout = dict(title='Probability Density', fixedrange=True)
return traces, xaxis_layout, yaxis_layout
def _create_bars_data_for_mixed_kde_plot(counts: np.ndarray, max_kde_value: float):
"""When showing a mixed KDE and bar plot, we want the bars to be on the same scale of y-values as the KDE values, \
so we normalize the counts to sum to 4 times the max KDE value."""
normalize_factor = 4 * max_kde_value / np.sum(counts)
return counts * normalize_factor
def _create_distribution_scatter_plot(xs, ys, mean, median, is_train):
traces = []
name = 'Train' if is_train else 'Test'
traces.append(go.Scatter(x=xs, y=ys, fill='tozeroy', name=f'{name} Dataset',
line_color=colors[name], line_shape='spline'))
y_mean_index = np.argmax(xs == mean)
traces.append(go.Scatter(x=[mean, mean], y=[0, ys[y_mean_index]], name=f'{name} Mean',
line=dict(color=colors[name], dash='dash'), mode='lines+markers'))
y_median_index = np.argmax(xs == median)
traces.append(go.Scatter(x=[median, median], y=[0, ys[y_median_index]], name=f'{name} Median',
line=dict(color=colors[name]), mode='lines'))
return traces
def _create_distribution_bar_graphs(
train_column: t.Union[np.ndarray, pd.Series],
test_column: t.Union[np.ndarray, pd.Series],
max_num_categories: int,
show_categories_by: CategoriesSortingKind
) -> t.Tuple[t.Any, t.Any]:
"""
Create distribution bar graphs.
Returns
-------
Tuple[Any, Any]:
a tuple instance with figues traces, yaxis layout
"""
expected, actual, categories_list = preprocess_2_cat_cols_to_same_bins(
dist1=train_column,
dist2=test_column
)
expected_percents, actual_percents = expected / len(train_column), actual / len(test_column)
if show_categories_by == 'train_largest':
sort_func = lambda tup: tup[0]
elif show_categories_by == 'test_largest':
sort_func = lambda tup: tup[1]
elif show_categories_by == 'largest_difference':
sort_func = lambda tup: np.abs(tup[0] - tup[1])
else:
raise runml_checksValueError(
'show_categories_by must be either "train_largest", "test_largest" '
f'or "largest_difference", instead got: {show_categories_by}'
)
# Sort the lists together according to the parameter show_categories_by (done by sorting zip and then using it again
# to return the lists to the original 3 separate ones).
# Afterwards, leave only the first max_num_categories values in each list.
distribution = sorted(
zip(expected_percents, actual_percents, categories_list),
key=sort_func,
reverse=True
)
expected_percents, actual_percents, categories_list = zip(
*distribution[:max_num_categories]
)
# fixes plotly widget bug with numpy values by converting them to native values
# https://github.com/plotly/plotly.py/issues/3470
cat_df = pd.DataFrame(
{'Train dataset': expected_percents, 'Test dataset': actual_percents},
index=[un_numpy(cat) for cat in categories_list]
)
# Creating sorting function which works on both numbers and strings
def sort_int_and_strings(a, b):
# If both numbers or both same type using regular operator
if type(a) is type(b) or (isinstance(a, Number) and isinstance(b, Number)):
return -1 if a < b else 1
# Sort numbers before strings
return -1 if isinstance(a, Number) else 1
cat_df = cat_df.reindex(sorted(cat_df.index, key=cmp_to_key(sort_int_and_strings)))
traces = [
go.Bar(
x=cat_df.index,
y=cat_df['Train dataset'],
marker=dict(color=colors['Train']),
name='Train Dataset',
),
go.Bar(
x=cat_df.index,
y=cat_df['Test dataset'],
marker=dict(color=colors['Test']),
name='Test Dataset',
)
]
yaxis_layout = dict(
fixedrange=True,
autorange=True,
rangemode='normal',
title='Frequency'
)
return traces, yaxis_layout | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/utils/distribution/plot.py | 0.961262 | 0.545709 | plot.py | pypi |
"""Common utilities for distribution checks."""
from numbers import Number
from typing import Callable, Dict, Hashable, Optional, Tuple, Union
import numpy as np
import pandas as pd
from plotly.subplots import make_subplots
from scipy.stats import chi2_contingency, wasserstein_distance
from runml_checks import ConditionCategory, ConditionResult
from runml_checks.core.errors import runml_checksValueError, NotEnoughSamplesError
from runml_checks.utils.dict_funcs import get_dict_entry_by_value
from runml_checks.utils.distribution.plot import drift_score_bar_traces, feature_distribution_traces
from runml_checks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins
from runml_checks.utils.strings import format_number, format_percent
__all__ = ['calc_drift_and_plot', 'get_drift_method', 'SUPPORTED_CATEGORICAL_METHODS', 'SUPPORTED_NUMERIC_METHODS',
'drift_condition']
PSI_MIN_PERCENTAGE = 0.01
SUPPORTED_CATEGORICAL_METHODS = ['Cramer\'s V', 'PSI']
SUPPORTED_NUMERIC_METHODS = ['Earth Mover\'s Distance']
def get_drift_method(result_dict: Dict):
"""Return which drift scoring methods were in use.
Parameters
----------
result_dict : Dict
the result dict of the drift check.
Returns
-------
Tuple(str, str)
the categorical scoring method and then the numeric scoring method.
"""
result_df = pd.DataFrame(result_dict).T
cat_mthod_arr = result_df[result_df['Method'].isin(SUPPORTED_CATEGORICAL_METHODS)]['Method']
cat_method = cat_mthod_arr[0] if len(cat_mthod_arr) else None
num_mthod_arr = result_df[result_df['Method'].isin(SUPPORTED_NUMERIC_METHODS)]['Method']
num_method = num_mthod_arr[0] if len(num_mthod_arr) else None
return cat_method, num_method
def cramers_v(dist1: Union[np.ndarray, pd.Series], dist2: Union[np.ndarray, pd.Series]) -> float:
"""Calculate the Cramer's V statistic.
For more on Cramer's V, see https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V
Uses the Cramer's V bias correction, see http://stats.lse.ac.uk/bergsma/pdf/cramerV3.pdf
Function is for categorical data only.
Parameters
----------
dist1 : Union[np.ndarray, pd.Series]
array of numerical values.
dist2 : Union[np.ndarray, pd.Series]
array of numerical values to compare dist1 to.
Returns
-------
float
the bias-corrected Cramer's V value of the 2 distributions.
"""
dist1_counts, dist2_counts, _ = preprocess_2_cat_cols_to_same_bins(dist1=dist1, dist2=dist2)
contingency_matrix = pd.DataFrame([dist1_counts, dist2_counts])
# If columns have the same single value in both (causing division by 0), return 0 drift score:
if contingency_matrix.shape[1] == 1:
return 0
# This is based on
# https://stackoverflow.com/questions/46498455/categorical-features-correlation/46498792#46498792 # noqa: SC100
# and reused in other sources
# (https://towardsdatascience.com/the-search-for-categorical-correlation-a1cf7f1888c9) # noqa: SC100
chi2 = chi2_contingency(contingency_matrix)[0]
n = contingency_matrix.sum().sum()
phi2 = chi2/n
r, k = contingency_matrix.shape
phi2corr = max(0, phi2 - ((k-1)*(r-1))/(n-1))
rcorr = r - ((r-1)**2)/(n-1)
kcorr = k - ((k-1)**2)/(n-1)
return np.sqrt(phi2corr / min((kcorr-1), (rcorr-1)))
def psi(expected_percents: np.ndarray, actual_percents: np.ndarray):
"""
Calculate the PSI (Population Stability Index).
See https://www.lexjansen.com/wuss/2017/47_Final_Paper_PDF.pdf
Parameters
----------
expected_percents: np.ndarray
array of percentages of each value in the expected distribution.
actual_percents: : np.ndarray
array of percentages of each value in the actual distribution.
Returns
-------
psi
The PSI score
"""
psi_value = 0
for i in range(len(expected_percents)):
# In order for the value not to diverge, we cap our min percentage value
e_perc = max(expected_percents[i], PSI_MIN_PERCENTAGE)
a_perc = max(actual_percents[i], PSI_MIN_PERCENTAGE)
value = (e_perc - a_perc) * np.log(e_perc / a_perc)
psi_value += value
return psi_value
def earth_movers_distance(dist1: Union[np.ndarray, pd.Series], dist2: Union[np.ndarray, pd.Series],
margin_quantile_filter: float):
"""
Calculate the Earth Movers Distance (Wasserstein distance).
See https://en.wikipedia.org/wiki/Wasserstein_metric
Function is for numerical data only.
Parameters
----------
dist1: Union[np.ndarray, pd.Series]
array of numerical values.
dist2: Union[np.ndarray, pd.Series]
array of numerical values to compare dist1 to.
margin_quantile_filter: float
float in range [0,0.5), representing which margins (high and low quantiles) of the distribution will be filtered
out of the EMD calculation. This is done in order for extreme values not to affect the calculation
disproportionally. This filter is applied to both distributions, in both margins.
Returns
-------
Any
the Wasserstein distance between the two distributions.
Raises
-------
runml_checksValueError
if the value of margin_quantile_filter is not in range [0, 0.5)
"""
if not isinstance(margin_quantile_filter, Number) or margin_quantile_filter < 0 or margin_quantile_filter >= 0.5:
raise runml_checksValueError(
f'margin_quantile_filter expected a value in range [0, 0.5), instead got {margin_quantile_filter}')
if margin_quantile_filter != 0:
dist1_qt_min, dist1_qt_max = np.quantile(dist1, [margin_quantile_filter, 1-margin_quantile_filter])
dist2_qt_min, dist2_qt_max = np.quantile(dist2, [margin_quantile_filter, 1-margin_quantile_filter])
dist1 = dist1[(dist1_qt_max >= dist1) & (dist1 >= dist1_qt_min)]
dist2 = dist2[(dist2_qt_max >= dist2) & (dist2 >= dist2_qt_min)]
val_max = np.max([np.max(dist1), np.max(dist2)])
val_min = np.min([np.min(dist1), np.min(dist2)])
if val_max == val_min:
return 0
# Scale the distribution between 0 and 1:
dist1 = (dist1 - val_min) / (val_max - val_min)
dist2 = (dist2 - val_min) / (val_max - val_min)
return wasserstein_distance(dist1, dist2)
def calc_drift_and_plot(train_column: pd.Series,
test_column: pd.Series,
value_name: Hashable,
column_type: str,
plot_title: Optional[str] = None,
margin_quantile_filter: float = 0.025,
max_num_categories_for_drift: int = 10,
max_num_categories_for_display: int = 10,
show_categories_by: str = 'largest_difference',
categorical_drift_method='cramer_v',
min_samples: int = 10,
with_display: bool = True) -> Tuple[float, str, Callable]:
"""
Calculate drift score per column.
Parameters
----------
train_column: pd.Series
column from train dataset
test_column: pd.Series
same column from test dataset
value_name: Hashable
title of the x axis, if plot_title is None then also the title of the whole plot.
column_type: str
type of column (either "numerical" or "categorical")
plot_title: str or None
if None use value_name as title otherwise use this.
margin_quantile_filter: float, default: 0.025
float in range [0,0.5), representing which margins (high and low quantiles) of the distribution will be filtered
out of the EMD calculation. This is done in order for extreme values not to affect the calculation
disproportionally. This filter is applied to both distributions, in both margins.
max_num_categories_for_drift: int, default: 10
Max number of allowed categories. If there are more, they are binned into an "Other" category.
max_num_categories_for_display: int, default: 10
Max number of categories to show in plot.
show_categories_by: str, default: 'largest_difference'
Specify which categories to show for categorical features' graphs, as the number of shown categories is limited
by max_num_categories_for_display. Possible values:
- 'train_largest': Show the largest train categories.
- 'test_largest': Show the largest test categories.
- 'largest_difference': Show the largest difference between categories.
categorical_drift_method: str, default: "cramer_v"
decides which method to use on categorical variables. Possible values are:
"cramers_v" for Cramer's V, "PSI" for Population Stability Index (PSI).
min_samples: int, default: 10
Minimum number of samples for each column in order to calculate draft
Returns
-------
Tuple[float, str, Callable]
drift score of the difference between the two columns' distributions (Earth movers distance for
numerical, PSI for categorical)
graph comparing the two distributions (density for numerical, stack bar for categorical)
"""
train_dist = train_column.dropna().values.reshape(-1)
test_dist = test_column.dropna().values.reshape(-1)
if len(train_dist) < min_samples or len(test_dist) < min_samples:
raise NotEnoughSamplesError(f'For drift need {min_samples} samples but got {len(train_dist)} for train '
f'and {len(test_dist)} for test')
if column_type == 'numerical':
scorer_name = 'Earth Mover\'s Distance'
train_dist = train_dist.astype('float')
test_dist = test_dist.astype('float')
score = earth_movers_distance(dist1=train_dist, dist2=test_dist, margin_quantile_filter=margin_quantile_filter)
if not with_display:
return score, scorer_name, None
bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score)
dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces(train_dist, test_dist, value_name)
elif column_type == 'categorical':
if categorical_drift_method == 'cramer_v':
scorer_name = 'Cramer\'s V'
score = cramers_v(dist1=train_dist, dist2=test_dist)
elif categorical_drift_method == 'PSI':
scorer_name = 'PSI'
expected, actual, _ = \
preprocess_2_cat_cols_to_same_bins(dist1=train_column, dist2=test_column,
max_num_categories=max_num_categories_for_drift)
expected_percents, actual_percents = expected / len(train_column), actual / len(test_column)
score = psi(expected_percents=expected_percents, actual_percents=actual_percents)
else:
raise ValueError('Excpected categorical_drift_method to be one '
f'of [Cramer, PSI], recieved: {categorical_drift_method}')
if not with_display:
return score, scorer_name, None
bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score, bar_max=1)
dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces(
train_dist, test_dist, value_name, is_categorical=True,
max_num_categories=max_num_categories_for_display,
show_categories_by=show_categories_by
)
else:
# Should never reach here
raise runml_checksValueError(f'Unsupported column type for drift: {column_type}')
all_categories = list(set(train_column).union(set(test_column)))
add_footnote = column_type == 'categorical' and len(all_categories) > max_num_categories_for_display
if not add_footnote:
fig = make_subplots(rows=2, cols=1, vertical_spacing=0.2, shared_yaxes=False, shared_xaxes=False,
row_heights=[0.1, 0.9],
subplot_titles=[f'Drift Score ({scorer_name})', 'Distribution Plot'])
else:
fig = make_subplots(rows=3, cols=1, vertical_spacing=0.2, shared_yaxes=False, shared_xaxes=False,
row_heights=[0.1, 0.8, 0.1],
subplot_titles=[f'Drift Score ({scorer_name})', 'Distribution Plot'])
fig.add_traces(bar_traces, rows=1, cols=1)
fig.update_xaxes(bar_x_axis, row=1, col=1)
fig.update_yaxes(bar_y_axis, row=1, col=1)
fig.add_traces(dist_traces, rows=2, cols=1)
fig.update_xaxes(dist_x_axis, row=2, col=1)
fig.update_yaxes(dist_y_axis, row=2, col=1)
if add_footnote:
param_to_print_dict = {
'train_largest': 'largest categories (by train)',
'test_largest': 'largest categories (by test)',
'largest_difference': 'largest difference between categories'
}
train_data_percents = dist_traces[0].y.sum()
test_data_percents = dist_traces[1].y.sum()
fig.add_annotation(
x=0, y=-0.2, showarrow=False, xref='paper', yref='paper', xanchor='left',
text=f'* Showing the top {max_num_categories_for_display} {param_to_print_dict[show_categories_by]} out of '
f'total {len(all_categories)} categories.'
f'<br>Shown data is {format_percent(train_data_percents)} of train data and '
f'{format_percent(test_data_percents)} of test data.'
)
fig.update_layout(
legend=dict(
title='Legend',
yanchor='top',
y=0.6),
height=400,
title=dict(text=plot_title or value_name, x=0.5, xanchor='center'),
bargroupgap=0)
return score, scorer_name, fig
def drift_condition(max_allowed_categorical_score: float,
max_allowed_numeric_score: float,
subject_single: str,
subject_multi: str,
allowed_num_subjects_exceeding_threshold: int = 0):
"""Create a condition function to be used in drift check's conditions.
Parameters
----------
max_allowed_categorical_score: float
Max value allowed for categorical drift
max_allowed_numeric_score: float
Max value allowed for numerical drift
subject_single: str
String that represents the subject being tested as single (feature, column, property)
subject_multi: str
String that represents the subject being tested as multiple (features, columns, properties)
allowed_num_subjects_exceeding_threshold: int, default: 0
Determines the number of properties with drift score above threshold needed to fail the condition.
"""
def condition(result: dict):
cat_method, num_method = get_drift_method(result)
cat_drift_props = {prop: d['Drift score'] for prop, d in result.items()
if d['Method'] in SUPPORTED_CATEGORICAL_METHODS}
not_passing_categorical_props = {props: format_number(d) for props, d in cat_drift_props.items()
if d >= max_allowed_categorical_score}
num_drift_props = {prop: d['Drift score'] for prop, d in result.items()
if d['Method'] in SUPPORTED_NUMERIC_METHODS}
not_passing_numeric_props = {prop: format_number(d) for prop, d in num_drift_props.items()
if d >= max_allowed_numeric_score}
num_failed = len(not_passing_categorical_props) + len(not_passing_numeric_props)
if num_failed > allowed_num_subjects_exceeding_threshold:
details = f'Failed for {num_failed} out of {len(result)} {subject_multi}.'
if not_passing_categorical_props:
details += f'\nFound {len(not_passing_categorical_props)} categorical {subject_multi} with ' \
f'{cat_method} above threshold: {not_passing_categorical_props}'
if not_passing_numeric_props:
details += f'\nFound {len(not_passing_numeric_props)} numeric {subject_multi} with {num_method} above' \
f' threshold: {not_passing_numeric_props}'
return ConditionResult(ConditionCategory.FAIL, details)
else:
details = f'Passed for {len(result) - num_failed} {subject_multi} out of {len(result)} {subject_multi}.'
if cat_drift_props:
prop, score = get_dict_entry_by_value(cat_drift_props)
details += f'\nFound {subject_single} "{prop}" has the highest categorical drift score: ' \
f'{format_number(score)}'
if num_drift_props:
prop, score = get_dict_entry_by_value(num_drift_props)
details += f'\nFound {subject_single} "{prop}" has the highest numerical drift score: ' \
f'{format_number(score)}'
return ConditionResult(ConditionCategory.PASS, details)
return condition | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/utils/distribution/drift.py | 0.962027 | 0.452778 | drift.py | pypi |
# pylint: disable=invalid-name
from typing import Tuple
import numpy as np
from sklearn.neighbors import KDTree, KNeighborsClassifier
from runml_checks.utils.logger import get_logger
__all__ = ['TrustScore']
class TrustScore:
"""Calculate trust score.
Parameters
----------
k_filter : int , default: 10
Number of neighbors used during either kNN distance or probability filtering.
alpha : float , default: 0.
Fraction of instances to filter out to reduce impact of outliers.
filter_type : str , default: distance_knn
Filter method; either 'distance_knn' or 'probability_knn'
leaf_size : int , default: 40
Number of points at which to switch to brute-force. Affects speed and memory required to
build trees. Memory to store the tree scales with n_samples / leaf_size.
metric : str , default: euclidean
Distance metric used for the tree. See sklearn's DistanceMetric class for a list of available
metrics.
dist_filter_type : str , default: point
Use either the distance to the k-nearest point (dist_filter_type = 'point') or
the average distance from the first to the k-nearest point in the data (dist_filter_type = 'mean').
"""
def __init__(self, k_filter: int = 10, alpha: float = 0., filter_type: str = 'distance_knn',
leaf_size: int = 40, metric: str = 'euclidean', dist_filter_type: str = 'point') -> None:
super().__init__()
self.k_filter = k_filter
self.alpha = alpha
self.filter = filter_type
self.eps = 1e-12
self.leaf_size = leaf_size
self.metric = metric
self.dist_filter_type = dist_filter_type
def filter_by_distance_knn(self, X: np.ndarray) -> np.ndarray:
"""Filter out instances with low kNN density.
Calculate distance to k-nearest point in the data for each instance and remove instances above a cutoff
distance.
Parameters
----------
X : np.ndarray
Data to filter
Returns
-------
np.ndarray
Filtered data
"""
kdtree = KDTree(X, leaf_size=self.leaf_size, metric=self.metric)
k = min(self.k_filter + 1, len(X))
knn_r = kdtree.query(X, k=k)[0] # distances from 0 to k-nearest points
if self.dist_filter_type == 'point':
knn_r = knn_r[:, -1]
elif self.dist_filter_type == 'mean':
knn_r = np.mean(knn_r[:, 1:], axis=1) # exclude distance of instance to itself
cutoff_r = np.percentile(knn_r, (1 - self.alpha) * 100) # cutoff distance
X_keep = X[np.where(knn_r <= cutoff_r)[0], :] # define instances to keep
return X_keep
def filter_by_probability_knn(self, X: np.ndarray, Y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Filter out instances with high label disagreement amongst its k nearest neighbors.
Parameters
----------
X : np.ndarray
Data
Y : np.ndarray
Predicted class labels
Returns
-------
Tuple[np.ndarray, np.ndarray]
Filtered data and labels.
"""
if self.k_filter == 1:
get_logger().warning('Number of nearest neighbors used for probability density filtering should '
'be >1, otherwise the prediction probabilities are either 0 or 1 making '
'probability filtering useless.')
# fit kNN classifier and make predictions on X
clf = KNeighborsClassifier(n_neighbors=self.k_filter, leaf_size=self.leaf_size, metric=self.metric)
clf.fit(X, Y)
preds_proba = clf.predict_proba(X)
# define cutoff and instances to keep
preds_max = np.max(preds_proba, axis=1)
cutoff_proba = np.percentile(preds_max, self.alpha * 100) # cutoff probability
keep_id = np.where(preds_max >= cutoff_proba)[0] # define id's of instances to keep
X_keep, Y_keep = X[keep_id, :], Y[keep_id]
return X_keep, Y_keep
def fit(self, X: np.ndarray, Y: np.ndarray) -> None:
"""Build KDTrees for each prediction class.
Parameters
----------
X : np.ndarray
Data.
Y : np.ndarray
Target labels, either one-hot encoded or the actual class label.
"""
if len(X.shape) > 2:
get_logger().warning('Reshaping data from %s to %s so k-d trees can be built.',
X.shape, X.reshape(X.shape[0], -1).shape)
X = X.reshape(X.shape[0], -1)
# make sure Y represents predicted classes, not one-hot encodings
if len(Y.shape) > 1:
Y = np.argmax(Y, axis=1)
self.classes = Y.shape[1]
else:
self.classes = len(np.unique(Y))
# KDTree and kNeighborsClassifier need 2D data
self.kdtrees = [None] * self.classes
self.X_kdtree = [None] * self.classes
if self.filter == 'probability_knn':
X_filter, Y_filter = self.filter_by_probability_knn(X, Y)
for c in range(self.classes):
if self.filter is None:
X_fit = X[np.where(Y == c)[0]]
elif self.filter == 'distance_knn':
X_fit = self.filter_by_distance_knn(X[np.where(Y == c)[0]])
elif self.filter == 'probability_knn':
X_fit = X_filter[np.where(Y_filter == c)[0]]
else:
raise Exception('self.filter must be one of ["distance_knn", "probability_knn", None]')
no_x_fit = len(X_fit) == 0
if no_x_fit or len(X[np.where(Y == c)[0]]) == 0:
if no_x_fit and len(X[np.where(Y == c)[0]]) == 0:
get_logger().warning('No instances available for class %s', c)
elif no_x_fit:
get_logger().warning('Filtered all the instances for class %s. Lower alpha or check data.', c)
else:
self.kdtrees[c] = KDTree(X_fit, leaf_size=self.leaf_size,
metric=self.metric) # build KDTree for class c
self.X_kdtree[c] = X_fit
def score(self, X: np.ndarray, Y: np.ndarray, k: int = 2, dist_type: str = 'point') \
-> Tuple[np.ndarray, np.ndarray]:
"""Calculate trust scores.
ratio of distance to closest class other than the predicted class to distance to predicted class.
Parameters
----------
X : np.ndarray
Instances to calculate trust score for.
Y : np.ndarray
Either prediction probabilities for each class or the predicted class.
k : int , default: 2
Number of nearest neighbors used for distance calculation.
dist_type : str , default: point
Use either the distance to the k-nearest point (dist_type = 'point') or the average
distance from the first to the k-nearest point in the data (dist_type = 'mean').
Returns
-------
Tuple[np.ndarray, np.ndarray]
Batch with trust scores and the closest not predicted class.
"""
# make sure Y represents predicted classes, not probabilities
if len(Y.shape) > 1:
Y = np.argmax(Y, axis=1)
# KDTree needs 2D data
if len(X.shape) > 2:
X = X.reshape(X.shape[0], -1)
d = np.tile(None, (X.shape[0], self.classes)) # init distance matrix: [nb instances, nb classes]
for c in range(self.classes):
if (self.kdtrees[c] is None) or (self.kdtrees[c].data.shape[0] < k):
d[:, c] = np.inf
else:
d_tmp = self.kdtrees[c].query(X, k=k)[0] # get k nearest neighbors for each class
if dist_type == 'point':
d[:, c] = d_tmp[:, -1]
elif dist_type == 'mean':
d[:, c] = np.nanmean(d_tmp[np.isfinite(d_tmp)], axis=1)
sorted_d = np.sort(d, axis=1) # sort distance each instance in batch over classes
# get distance to predicted and closest other class and calculate trust score
d_to_pred = d[range(d.shape[0]), Y]
d_to_closest_not_pred = np.where(sorted_d[:, 0] != d_to_pred, sorted_d[:, 0], sorted_d[:, 1])
trust_score = d_to_closest_not_pred / (d_to_pred + self.eps)
# closest not predicted class
class_closest_not_pred = np.where(d == d_to_closest_not_pred.reshape(-1, 1))[1]
return trust_score, class_closest_not_pred
@staticmethod
def process_confidence_scores(baseline_scores: np.ndarray, test_scores: np.ndarray):
"""Process confidence scores."""
# code to filter extreme confidence values
filter_center_factor = 4
filter_center_size = 40. # % of data
baseline_confidence = baseline_scores
if test_scores is None:
test_confidence = baseline_scores
else:
test_confidence = test_scores
center_size = max(np.nanpercentile(baseline_confidence, 50 + filter_center_size / 2),
np.nanpercentile(test_confidence, 50 + filter_center_size / 2)) - \
min(np.nanpercentile(baseline_confidence, 50 - filter_center_size / 2),
np.nanpercentile(test_confidence, 50 - filter_center_size / 2))
max_median = max(np.nanmedian(baseline_confidence), np.nanmedian(test_confidence))
min_median = min(np.nanmedian(baseline_confidence), np.nanmedian(test_confidence))
upper_thresh = max_median + filter_center_factor * center_size
lower_thresh = min_median - filter_center_factor * center_size
baseline_confidence[(baseline_confidence > upper_thresh) | (baseline_confidence < lower_thresh)] = np.nan
test_confidence[(test_confidence > upper_thresh) | (test_confidence < lower_thresh)] = np.nan
baseline_confidence = baseline_confidence.astype(float)
test_confidence = test_confidence.astype(float)
if test_scores is None:
test_confidence = None
return baseline_confidence, test_confidence | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/utils/distribution/trust_score.py | 0.933907 | 0.508727 | trust_score.py | pypi |
"""Module of RareCategoryEncoder."""
from collections import defaultdict
from typing import List, Optional
import pandas as pd
from runml_checks.utils.typing import Hashable
__all__ = ['RareCategoryEncoder']
class RareCategoryEncoder:
"""Encodes rare categories into an "other" parameter.
Note that this encoder assumes data is received as a DataFrame.
Parameters
----------
max_num_categories : int , default: 10
Indicates the maximum number of unique categories in a single categorical column
(rare categories will be changed to a form of "other")
cols : Optional[List[Hashable]] , default: None
Columns to limit the encoder to work on. If non are given will work on all columns given in `fit`
"""
DEFAULT_OTHER_VALUE = 'OTHER_RARE_CATEGORY'
def __init__(
self,
max_num_categories: int = 10,
cols: Optional[List[Hashable]] = None
):
self.max_num_categories = max_num_categories
self.cols = cols
self._col_mapping = None
def fit(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument
"""Fit the encoder using given dataframe.
Parameters
----------
data : pd.DataFrame
data to fit from
y :
Unused, but needed for sklearn pipeline
"""
self._col_mapping = {}
if self.cols is not None:
for col in self.cols:
self._col_mapping[col] = self._fit_for_series(data[col])
else:
for col in data.columns:
self._col_mapping[col] = self._fit_for_series(data[col])
def transform(self, data: pd.DataFrame):
"""Transform given data according to columns processed in `fit`.
Parameters
----------
data : pd.DataFrame
data to transform
Returns
-------
DataFrame
transformed data
"""
if self._col_mapping is None:
raise RuntimeError('Cannot transform without fitting first')
if self.cols is not None:
data = data.copy()
data[self.cols] = data[self.cols].apply(lambda s: s.map(self._col_mapping[s.name]))
else:
data = data.apply(lambda s: s.map(self._col_mapping[s.name]))
return data
def fit_transform(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument
"""Run `fit` and `transform` on given data.
Parameters
----------
data : pd.DataFrame
data to fit on and transform
y :
Unused, but needed for sklearn pipeline
Returns
-------
DataFrame
transformed data
"""
self.fit(data)
return self.transform(data)
def _fit_for_series(self, series: pd.Series):
top_values = list(series.value_counts().head(self.max_num_categories).index)
other_value = self._get_unique_other_value(series)
mapper = defaultdict(lambda: other_value, {k: k for k in top_values})
return mapper
def _get_unique_other_value(self, series: pd.Series):
unique_values = list(series.unique())
other = self.DEFAULT_OTHER_VALUE
i = 0
while other in unique_values:
other = self.DEFAULT_OTHER_VALUE + str(i)
i += 1
return other | /runml_checks-1.0.0-py3-none-any.whl/runml_checks/utils/distribution/rare_category_encoder.py | 0.957238 | 0.560914 | rare_category_encoder.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.