code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import struct
from enum import Enum
from typing import BinaryIO
class Order(Enum):
"""
Pre-defined orders
"""
HELLO = 0
SERVO = 1
MOTOR = 2
ALREADY_CONNECTED = 3
ERROR = 4
RECEIVED = 5
STOP = 6
def read_order(f: BinaryIO) -> Order:
"""
:param f: file handler or serial file
:return: (Order Enum Object)
"""
return Order(read_i8(f))
def read_i8(f: BinaryIO) -> Order:
"""
:param f: file handler or serial file
:return: (int8_t)
"""
return struct.unpack("<b", bytearray(f.read(1)))[0]
def read_i16(f: BinaryIO) -> Order:
"""
:param f: file handler or serial file
:return: (int16_t)
"""
return struct.unpack("<h", bytearray(f.read(2)))[0]
def read_i32(f):
"""
:param f: file handler or serial file
:return: (int32_t)
"""
return struct.unpack("<l", bytearray(f.read(4)))[0]
def write_i8(f: BinaryIO, value: int) -> None:
"""
:param f: file handler or serial file
:param value: (int8_t)
"""
if -128 <= value <= 127:
f.write(struct.pack("<b", value))
else:
print(f"Value error:{value}")
def write_order(f: BinaryIO, order: Order) -> None:
"""
:param f: file handler or serial file
:param order: (Order Enum Object)
"""
write_i8(f, order.value)
def write_i16(f: BinaryIO, value: int) -> None:
"""
:param f: file handler or serial file
:param value: (int16_t)
"""
f.write(struct.pack("<h", value))
def write_i32(f: BinaryIO, value: int) -> None:
"""
:param f: file handler or serial file
:param value: (int32_t)
"""
f.write(struct.pack("<l", value))
def decode_order(f: BinaryIO, byte: int, debug: bool = False) -> None:
"""
:param f: file handler or serial file
:param byte: (int8_t)
:param debug: (bool) whether to print or not received messages
"""
try:
order = Order(byte)
if order == Order.HELLO:
msg = "HELLO"
elif order == Order.SERVO:
angle = read_i16(f)
# Bit representation
# print('{0:016b}'.format(angle))
msg = f"SERVO {angle}"
elif order == Order.MOTOR:
speed = read_i8(f)
msg = f"motor {speed}"
elif order == Order.ALREADY_CONNECTED:
msg = "ALREADY_CONNECTED"
elif order == Order.ERROR:
error_code = read_i16(f)
msg = f"Error {error_code}"
elif order == Order.RECEIVED:
msg = "RECEIVED"
elif order == Order.STOP:
msg = "STOP"
else:
msg = ""
print("Unknown Order", byte)
if debug:
print(msg)
except Exception as e:
print(f"Error decoding order {byte}: {e}")
print(f"byte={byte:08b}") | /robust_serial-0.2-py3-none-any.whl/robust_serial/robust_serial.py | 0.722918 | 0.253289 | robust_serial.py | pypi |
import glob
import queue
import sys
from typing import List, Optional
import serial
# From https://stackoverflow.com/questions/6517953/clear-all-items-from-the-queue
class CustomQueue(queue.Queue):
"""
A custom queue subclass that provides a :meth:`clear` method.
"""
def clear(self) -> None:
"""
Clears all items from the queue.
"""
with self.mutex:
unfinished = self.unfinished_tasks - len(self.queue)
if unfinished <= 0:
if unfinished < 0:
raise ValueError("task_done() called too many times")
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
self.queue.clear()
self.not_full.notify_all()
# From https://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
def get_serial_ports() -> List[str]:
"""
Lists serial ports.
:return: A list of available serial ports
"""
if sys.platform.startswith("win"):
ports = ["COM%s" % (i + 1) for i in range(256)]
elif sys.platform.startswith("linux") or sys.platform.startswith("cygwin"):
# this excludes your current terminal "/dev/tty"
ports = glob.glob("/dev/tty[A-Za-z]*")
elif sys.platform.startswith("darwin"):
ports = glob.glob("/dev/tty.*")
else:
raise OSError("Unsupported platform")
results = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
results.append(port)
except (OSError, serial.SerialException):
pass
return results
def open_serial_port(
serial_port: Optional[str] = None,
baudrate: int = 115200,
timeout: Optional[int] = 0,
write_timeout: int = 0,
) -> serial.Serial:
"""
Try to open serial port with Arduino
If not port is specified, it will be automatically detected
:param serial_port:
:param baudrate:
:param timeout: None -> blocking mode
:param write_timeout:
:return: (Serial Object)
"""
# Open serial port (for communication with Arduino)
if serial_port is None:
serial_port = get_serial_ports()[0]
# timeout=0 non-blocking mode, return immediately in any case, returning zero or more,
# up to the requested number of bytes
return serial.Serial(port=serial_port, baudrate=baudrate, timeout=timeout, writeTimeout=write_timeout) | /robust_serial-0.2-py3-none-any.whl/robust_serial/utils.py | 0.476092 | 0.178204 | utils.py | pypi |
from abc import abstractmethod
import sys
if sys.version_info > (3, 3):
import urllib.parse as parse
else:
import urlparse as parse
import numpy as np
from foolbox.models import DifferentiableModel
from foolbox.attacks import Attack
import foolbox
class HTTPClient(object):
"""Base class for HTTPModel and HTTPAttack."""
@abstractmethod
def _encode_array_data(self, array):
"""
Must be implemented by subclasses that use _encoded_arrays.
"""
raise NotImplementedError
@abstractmethod
def _decode_array_data(self, data, dtype, shape):
"""
Must be implemented by subclasses that use _decode_arrays.
"""
raise NotImplementedError
def _encode_arrays(self, data):
"""
Can be used by subclasses to encode numpy arrays. To use it,
subclasses must implement _encode_array_data.
"""
encoded = {}
for key in list(data.keys()):
if isinstance(data[key], np.ndarray):
array = data[key]
encoded[key] = {
'type': 'array',
'shape': array.shape,
'dtype': array.dtype.str,
'data': self._encode_array_data(array),
}
else:
encoded[key] = data[key]
return encoded
def _decode_arrays(self, encoded):
"""
Can be used by subclasses to decode numpy arrays. To use it,
subclasses must implement _decode_array_data.
"""
decoded = {}
for key in list(encoded.keys()):
if hasattr(encoded[key], 'get') \
and encoded[key].get('type') == 'array':
shape = encoded[key]['shape']
dtype = encoded[key]['dtype']
data = encoded[key]['data']
array = self._decode_array_data(data, dtype, shape)
decoded[key] = array
else:
decoded[key] = encoded[key]
return decoded
@abstractmethod
def _post(self, path, data):
"""
Encodes the data dictionary, sends it to the http server using
an http post request to the url specified by path, decodes
the result and returns it as a dictionary.
"""
raise NotImplementedError
def _get(self, path):
"""
Performs a get request to the url specified by path and
returns the result as text. Subclasses can override this
if necessary.
"""
url = self._url(path=path)
r = self.requests.get(url)
assert r.ok
return r.text
@abstractmethod
def _url(self, path=''):
raise NotImplementedError
class HTTPModel(DifferentiableModel, HTTPClient):
"""Base class for models that connect to an http server and
dispatch all requets to that server.
Parameters
----------
url : str
The http or https URL of the server.
"""
def __init__(self, url):
import requests
self.requests = requests
self._base_url = url
bounds = self._remote_bounds()
channel_axis = self._remote_channel_axis()
super(HTTPModel, self).__init__(
bounds=bounds, channel_axis=channel_axis)
self._dataset = self._remote_dataset()
self._image_size = self._remote_image_size()
self._channel_order = self._remote_channel_order()
self._num_classes = self._remote_num_classes()
def _url(self, path=''):
return parse.urljoin(self._base_url, path)
@property
def base_url(self):
return self._base_url
def _remote_bounds(self):
s = self._get('/bounds')
min_, max_ = s.split('\n')
min_ = float(min_)
max_ = float(max_)
return (min_, max_)
def _remote_channel_axis(self):
s = self._get('/channel_axis')
return int(s)
def _remote_image_size(self):
s = self._get('/image_size')
return int(s)
def _remote_dataset(self):
return self._get('/dataset').upper()
def _remote_channel_order(self):
return self._get('/channel_order')
def _remote_num_classes(self):
s = self._get('/num_classes')
return int(s)
def shutdown(self):
s = self._get('/shutdown')
return s
def num_classes(self):
return self._num_classes
def channel_order(self):
return self._channel_order
def image_size(self):
return self._image_size
def dataset(self):
return self._dataset
def server_version(self):
s = self._get('/server_version')
return s
def batch_predictions(self, images):
images = np.asarray(images)
data = {'images': images}
result = self._post('/batch_predictions', data)
predictions = result['predictions']
return predictions
def predictions_and_gradient(self, image, label):
image = np.asarray(image)
label = np.asarray(label)
data = {'image': image, 'label': label}
result = self._post('/predictions_and_gradient', data)
predictions = result['predictions']
gradient = result['gradient']
return predictions, gradient
def backward(self, gradient, image):
gradient = np.asarray(gradient)
image = np.asarray(image)
data = {'gradient': gradient, 'image': image}
result = self._post('/backward', data)
gradient = result['gradient']
return gradient
class HTTPAttack(Attack, HTTPClient):
"""Base class for attacks that connect to an http server and
dispatch all requets to that server.
Parameters
----------
url : str
The http or https URL of the server.
"""
def __init__(self, attack_url, model=None, criterion=None):
import requests
self.requests = requests
self._base_url = attack_url
super().__init__(model=model, criterion=criterion)
def _url(self, path=''):
return parse.urljoin(self._base_url, path)
def shutdown(self):
s = self._get('/shutdown')
return s
def server_version(self):
s = self._get('/server_version')
return s
def _apply(self, a):
assert a.image is None
assert a.distance.value == np.inf
assert a._distance == foolbox.distances.MSE
assert isinstance(a._criterion, foolbox.criteria.Misclassification) # noqa: E501
assert isinstance(a._model, BSONModel)
image = np.asarray(a.original_image)
label = np.asarray(a.original_class)
model_url = a._model.base_url
criterion_name = 'Misclassification'
data = {
'model_url': model_url,
'image': image,
'label': label,
'criterion_name': criterion_name,
}
result = self._post('/run', data)
adversarial_image = result['adversarial_image']
if adversarial_image is not None:
a.predictions(adversarial_image)
assert a.image is not None
assert a.distance.value < np.inf
class BSON(object):
def _encode_array_data(self, array):
"""
Converts a numpy array to bytes.
"""
return array.tobytes()
def _decode_array_data(self, data, dtype, shape):
"""
Converts bytes to a numpy array.
"""
return np.frombuffer(data, dtype=dtype).reshape(shape)
def _post(self, path, data):
import bson
url = self._url(path=path)
headers = {'content-type': 'application/bson'}
data = self._encode_arrays(data)
data = bson.dumps(data)
r = self.requests.post(url, headers=headers, data=data)
assert r.ok
result = r.content
result = bson.loads(result)
result = self._decode_arrays(result)
return result
class BSONAttack(BSON, HTTPAttack):
"""
An attack that connects to an http server and dispatches all
requests to that server using BSON-encoded http requests.
"""
pass
class BSONModel(BSON, HTTPModel):
"""
A model that connects to an http server and dispatches all
requests to that server using BSON-encoded http requests.
"""
pass | /robust_vision_benchmark-0.9.1.tar.gz/robust_vision_benchmark-0.9.1/robust_vision_benchmark/client.py | 0.70416 | 0.293379 | client.py | pypi |
from __future__ import print_function
from __future__ import absolute_import
from functools import wraps
import inspect
import os
from io import BytesIO
from flask import Flask
from flask import Response
from flask import request
from PIL import Image
import numpy as np
import bson
from .client import BSONModel
import foolbox
from . import __version__
def mnist_model_server(model, port=None):
"""Starts an HTTP server that provides access to a Foolbox MNIST model.
Parameters
----------
model : `foolbox.model.Model` instance
The model that should be run.
port : int
The TCP port used by the HTTP server. Defaults to the PORT environment
variable or 62222 if not set.
"""
return _model_server('MNIST', model, image_size=32, port=port)
def cifar_model_server(model, channel_order, port=None):
"""Starts an HTTP server that provides access to a Foolbox CIFAR model.
Parameters
----------
model : `foolbox.model.Model` instance
The model that should be run.
channel_order : str
The color channel ordering expected by the model ('RGB' or 'BGR')
port : int
The TCP port used by the HTTP server. Defaults to the PORT environment
variable or 62222 if not set.
"""
assert channel_order in ['RGB', 'BGR']
return _model_server(
'CIFAR', model, channel_order=channel_order, image_size=32, port=port)
def imagenet_model_server(model, channel_order, image_size, port=None):
"""Starts an HTTP server that provides access to a Foolbox model.
Parameters
----------
model : `foolbox.model.Model` instance
The model that should be run.
channel_order : str
The color channel ordering expected by the model ('RGB' or 'BGR')
image_size : int
The image size expected by the model (e.g. 224 or 299)
port : int
The TCP port used by the HTTP server. Defaults to the PORT environment
variable or 62222 if not set.
"""
assert channel_order in ['RGB', 'BGR']
assert isinstance(image_size, int)
return _model_server(
'IMAGENET', model, channel_order=channel_order,
image_size=image_size, port=port)
def _model_server(
dataset, model, channel_order=None, image_size=None, port=None):
"""Starts an HTTP server that provides access to a Foolbox model.
Parameters
----------
dataset : str
The dataset the model is compatible with (MNIST, CIFAR or IMAGENET)
model : `foolbox.model.Model` instance
The model that should be run.
channel_order : str
The color channel ordering expected by the model
(None for MNIST, 'RGB' or 'BGR' for CIFAR and ImageNet)
image_size : int
The image size expected by the model (for ImageNet only!)
port : int
The TCP port used by the HTTP server. Defaults to the PORT environment
variable or 62222 if not set.
"""
assert dataset in ['MNIST', 'CIFAR', 'IMAGENET']
if port is None:
port = os.environ.get('PORT')
if port is None:
port = 62222
app = Flask(__name__)
_batch_predictions = _wrap(
model.batch_predictions, ['predictions'])
_predictions_and_gradient = _wrap(
model.predictions_and_gradient, ['predictions', 'gradient'])
_backward = _wrap(
model.backward, ['gradient'])
@app.route("/")
def main(): # pragma: no cover
return Response(
'Robust Vision Benchmark Model Server\n',
mimetype='text/plain')
@app.route("/server_version", methods=['GET'])
def server_version():
v = __version__
return Response(str(v), mimetype='text/plain')
@app.route("/dataset", methods=['GET'])
def r_dataset():
return Response(dataset, mimetype='text/plain')
@app.route("/bounds", methods=['GET'])
def bounds():
min_, max_ = model.bounds()
return Response(
'{}\n{}'.format(min_, max_), mimetype='text/plain')
@app.route("/channel_axis", methods=['GET'])
def channel_axis():
result = model.channel_axis()
assert result == int(result)
result = str(int(result))
return Response(result, mimetype='text/plain')
@app.route("/num_classes", methods=['GET'])
def num_classes():
result = model.num_classes()
assert result == int(result)
result = str(int(result))
return Response(result, mimetype='text/plain')
@app.route("/image_size", methods=['GET'])
def r_iimage_size():
assert int(image_size) == image_size
return Response(str(image_size), mimetype='text/plain')
@app.route("/channel_order", methods=['GET'])
def r_channel_order():
return Response(channel_order, mimetype='text/plain')
@app.route("/batch_predictions", methods=['POST'])
def batch_predictions():
return _batch_predictions(request)
@app.route("/predictions_and_gradient", methods=['POST'])
def predictions_and_gradient():
return _predictions_and_gradient(request)
@app.route("/backward", methods=['POST'])
def backward():
return _backward(request)
@app.route("/shutdown", methods=['GET'])
def shutdown():
_shutdown_server()
return 'Shutting down ...'
app.run(host='0.0.0.0', port=port)
def attack_server(attack, port=None):
"""Starts an HTTP server that provides access to an attack.
Parameters
----------
attack : function or other callable, e.g. a `foolbox.attack.Attack`
The function or callable (e.g. foolbox attack) that finds
adversarials for a given instance of the foolbox Adversarial class.
port : int
The TCP port used by the HTTP server. Defaults to the PORT environment
variable or 62222 if not set.
"""
assert attack is not None
if port is None:
port = os.environ.get('PORT')
if port is None:
port = 52222
app = Flask(__name__)
def _run(model_url, image, label, criterion_name):
# transform the arguments into an Adversarial object
model = BSONModel(model_url)
assert criterion_name == 'Misclassification'
criterion = foolbox.criteria.Misclassification()
adversarial = foolbox.Adversarial(model, criterion, image, label)
# call the attack with the adversarial object
attack(adversarial)
return adversarial.image
_run = _wrap(_run, ['adversarial_image'])
@app.route("/")
def main(): # pragma: no cover
return Response(
'Robust Vision Benchmark Attack Server\n',
mimetype='text/plain')
@app.route("/server_version", methods=['GET'])
def server_version():
v = __version__
return Response(str(v), mimetype='text/plain')
@app.route("/run", methods=['POST'])
def run():
return _run(request)
@app.route("/shutdown", methods=['GET'])
def shutdown():
_shutdown_server()
return 'Shutting down ...'
app.run(host='0.0.0.0', port=port)
def _shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None: # pragma: no cover
raise RuntimeError('Not running with the Werkzeug Server')
func()
def _wrap(function, output_names):
"""A decorator that converts data between flask and python / numpy"""
try:
# Python 3
sig = inspect.signature(function)
params = sig.parameters
except AttributeError: # pragma: no cover
# Python 2.7
argspec = inspect.getargspec(function)
params = dict(zip(argspec.args, [None] * len(argspec.args)))
@wraps(function)
def wrapper(request):
verbose = request.args.get('verbose', False)
if verbose: # pragma: no cover
print('headers', request.headers)
print('args', list(request.args.keys()))
print('form keys', list(request.form.keys()))
print('files', list(request.files.keys()))
print('is_json', request.is_json)
print('data length', len(request.data))
content_type = request.headers.get('content-type', '').lower()
if content_type == 'application/bson':
bson_args = bson.loads(request.data)
bson_args = _decode_arrays(bson_args)
else: # pragma: no cover
bson_args = {}
args = {}
def add_argument(name, value):
if name in args: # pragma: no cover
print('ignoring {}, argument already exists'.format(name))
return
if name not in params: # pragma: no cover
print('ignoring {}, not accepted by function'.format(name))
return
args[name] = value
for name, value in bson_args.items():
add_argument(name, value)
for name, value in request.args.items(): # pragma: no cover
add_argument(name, value)
for name, value in request.form.items(): # pragma: no cover
add_argument(name, value)
for name, value in request.files.items(): # pragma: no cover
if name not in params:
continue
data = value.read()
param = params[name]
if param is not None and param.annotation == Image.Image:
data = Image.open(BytesIO(data))
add_argument(name, data)
result = function(**args)
if len(output_names) == 1:
result = {output_names[0]: result}
else:
assert len(result) == len(output_names)
result = dict(zip(output_names, result))
result = _encode_arrays(result)
result = bson.dumps(result)
return Response(result, mimetype='application/bson')
return wrapper
def _encode_arrays(d):
for key in list(d.keys()):
if isinstance(d[key], np.ndarray):
array = d[key]
d[key] = {
'type': 'array',
'shape': array.shape,
'dtype': array.dtype.str,
'data': array.tobytes(),
}
return d
def _decode_arrays(d):
for key in list(d.keys()):
if hasattr(d[key], 'get') \
and d[key].get('type') == 'array':
shape = d[key]['shape']
dtype = d[key]['dtype']
data = d[key]['data']
array = np.frombuffer(data, dtype=dtype).reshape(shape)
d[key] = array
return d | /robust_vision_benchmark-0.9.1.tar.gz/robust_vision_benchmark-0.9.1/robust_vision_benchmark/server.py | 0.862583 | 0.356433 | server.py | pypi |
from .utils import *
from .utils import JDEoptim, _psi_conv_cc, _psi2ipsi, _regularize_Mpsi, _convSS, _Mpsi, _Mwgt
from argparse import Namespace
from six import string_types
class NlrobControl():
"""
Class that contains the parameters options of the lmrob functions
"""
def __init__(self,
method,
psi ="bisquare",
init = "S",
optimizer = "JDEoptim",
fnscale=None,
tuning_chi_tau=None,
tuning_chi_scale=None,
tuning_chi=None,
cutoff=2.5,
*args, **kwargs
):
_Mchi_tuning_defaults = {
## Here, psi must be redescending! -> 'huber' not possible
'bisquare': np.array([1.54764]),
'welsh': np.array([0.5773502]),
'ggw': np.array([-0.5, 1.5, np.nan, 0.5]),
'lqq': np.array([-0.5, 1.5, np.nan, 0.5]),
'optimal': np.array([0.4047]),
'hampel': np.array([1.5, 3.5, 8]) * 0.2119163
}
_Mpsi_tuning_defaults = {
'huber':np.array([1.345]),
'bisquare':np.array([4.685061]),
'welsh':np.array([2.11]),
'ggw':np.array([-0.5, 1.5, .95, np.nan]),
'lqq':np.array([-0.5, 1.5, .95, np.nan]),
'optimal':np.array([1.060158]),
'hampel':np.array([1.5, 3.5, 8]) * 0.9016085
}
self.tuning_psi_M = None
self.psi = psi
if method == "M":
self.method = method
elif method == "MM":
self.method = method
self.init = init
self.psi = psi
self.tuning_chi_scale = _psi_conv_cc(psi, _Mchi_tuning_defaults[psi])
self.tuning_psi_M = _psi_conv_cc(psi, _Mpsi_tuning_defaults[psi])
self.optimizer = optimizer
self.fnscale=fnscale
elif method == "tau":
self.method = method
self.psi = psi
self.tuning_chi_tau = tuning_chi_tau if tuning_chi_tau else None
self.tuning_chi_scale = tuning_chi_scale if tuning_chi_scale else None
self.fnscale = fnscale if fnscale else None
elif method == "CM":
self.method = method
self.psi = psi
self.tuning_chi = tuning_chi if tuning_chi else None
self.fnscale = fnscale if fnscale else None
elif method == "mtl":
self.method = method
self.fnscale = fnscale if fnscale else None
self.cutoff = cutoff if cutoff else 2.5
else:
raise Exception("Method %s not correctly supported yet" % method)
def copy(self):
return copy.copy(self)
def __str__(self):
if self.method == "MM":
string = "self.method = {:}\n".format(self.method)
string += "self.init = {:}\n".format(self.init)
string += "self.psi = {:}\n".format(self.psi)
string += "self.tuning_chi_scale = {:}\n".format(self.tuning_chi_scale)
string += "self.tuning_psi_M = {:}\n".format(self.tuning_psi_M)
string += "self.optimizer = {:}\n".format(self.optimizer)
string += "self.optArgs = {:}\n".format(self.optArgs)
return string
def _Mwgt_psi1(psi, cc=None):
global deriv
if cc is None:
cc = _Mpsi_tuning_default[psi]
ipsi = _psi2ipsi(psi)
ccc = _psi_conv_cc(psi, cc)
def return_func(x, deriv=0):
if deriv:
return _Mpsi(x, ccc, ipsi, deriv)
else:
return _Mwgt(x, ccc, ipsi)
return return_func
def nlrob(formula, data, start=np.zeros(1),
lower=np.array([-np.Inf]),
upper=np.array([np.Inf]),
weights = None,
method = "MM",
psi=None,
scale = None,
control=None,
test_vec = "resid",
maxit = 20,
tol = 1e-06,
algorithm = "lm", doCov=False, trace=False):
"""
Fits a nonlinear regression model by robust methods. Per default, by an M-estimator, using iterated reweighted least squares (called “IRLS” or also “IWLS”).
This function returns a dictionary with the results
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta) (cf. nls). (For some checks: if f(.) is
linear, then we need parentheses, e.g., y ~ (a + b * x)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
start: pandas.core.frame.DataFrame
A named numeric vector of starting parameters estimates, only for
method = "M".
lower: pandas.core.frame.DataFrame
numeric vectors of lower and upper bounds; if needed, will be
replicated to be as long as the longest of start, lower or upper. For
(the default) method = "M", if the bounds are unspecified all
parameters are assumed to be unconstrained; also, for method "M",
bounds can only be used with the "port" algorithm. They are ignored,
with a warning, in cases they have no effect.
For methods "CM" and "mtl", the bounds must additionally have an entry
named "sigma" as that is determined simultaneously in the same
optimization, and hence its lower bound must not be negative.
upper: array_like
numeric vectors of lower and upper bounds; if needed, will be
replicated to be as long as the longest of start, lower or upper. For
(the default) method = "M", if the bounds are unspecified all
parameters are assumed to be unconstrained; also, for method "M",
bounds can only be used with the "port" algorithm. They are ignored,
with a warning, in cases they have no effect.
weights: arrray_like
An optional vector of weights to be used in the fitting process (for
intrinsic weights, not the weights w used in the iterative (robust)
fit). I.e., sum(w * e^2) is minimized with e = residuals,
e[i] = y[i] - f(xreg[i], theta), where f(x, theta) is the nonlinear
function, and w are the robust weights from resid * weights.
method: str
a character string specifying which method to use. The default is "M", for historical and back-compatibility reasons. For the other methods, primarily see nlrob.algorithms.
"M"
Computes an M-estimator, using nls(*, weights=*) iteratively (hence, IRLS) with weights equal to ψ(r_i) / r_i, where r_i is the i-the residual from the previous fit.
"MM"
Computes an MM-estimator, starting from init, either "S" or "lts".
"tau"
Computes a Tau-estimator.
"CM"
Computes a “Constrained M” (=: CM) estimator.
"mtl"
Compute as “Maximum Trimmed Likelihood” (=: MTL) estimator.
Note that all methods but "M" are “random”, hence typically to be preceded by set.seed() in usage.
psi: func
A function of the form g(x, 'tuning constant(s)', deriv) that for deriv=0 returns psi(x)/x and for deriv=1 returns psi'(x). Note that tuning constants can not be passed separately, but directly via the specification of psi, typically via a simple _Mwgt_psi1() call as per default.
scale: float
When not None, a positive number specifying a scale kept fixed during
the iterations (and returned as Scale component).
test_vec: str
Character string specifying the convergence criterion. The relative
change is tested for residuals with a value of "resid" (the default),
for coefficients with "coef", and for weights with "w".
maxit: int
maximum number of iterations in the robust loop.
tol: float
non-negative convergence tolerance for the robust fit.
algorithm: str
character string specifying the algorithm to use for nls, see there,
only when method = "M". The default algorithm is a Gauss-Newton
algorithm.
doCov: bool
a logical specifying if nlrob() should compute the asymptotic
variance-covariance matrix (see vcov) already. This used to be
hard-wired to TRUE; however, the default has been set to FALSE, as vcov
(obj) and summary(obj) can easily compute it when needed.
control: obj
An optional object of control settings.
trace: bool
logical value indicating if a “trace” of the nls iteration progress
should be printed. Default is False.
If True, in each robust iteration, the residual sum-of-squares and the
parameter values are printed at the conclusion of each nls iteration.
Returns
-------
coefficients: array_like
Coefficients of the regressor
residuals: array_like
Difference between the real values and the fitted_values
fitted_values: array_like
Estimated values by th regressor
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Function=NLROBInput.csv")
>>> # M Method
>>> method = "M"
>>> Rfit = nlrob(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob_MM:
nlrob_tau:
nlrob_CM:
nlrob_mtl:
"""
hasWgts = not weights is None
if method != "M":
control = NlrobControl(method)
if (hasWgts):
raise Exception("specifying 'weights' is not yet supported for method %s " % method)
if not psi is None:
print("For method = \"%s\", currently 'psi' must be specified via 'control'" % method )
def fixAns(mod):
ctrl = mod.get("ctrl")
if isinstance(ctrl.psi, string_types) and isinstance(ctrl.tuning_psi_M, (int,float)):
psi = _Mwgt_psi1(ctrl.psi, ctrl.tuning_psi_M)
res_sc = mod.get("residuals") / mod.get("Scale")
mod.update({"psi":psi})
mod.update({"w": psi(res_sc)})
mod.update({"rweights": psi(res_sc)})
return mod
if method == "MM":
return fixAns(nlrob_MM(formula, data, lower=lower, upper=upper,
tol=tol, ctrl=control))
elif method == "CM":
return fixAns(nlrob_CM(formula, data, lower=lower, upper=upper,
tol=tol, ctrl=control))
elif method == "tau":
return fixAns(nlrob_tau(formula, data, lower=lower, upper=upper,
tol=tol, ctrl=control))
elif method == "mtl":
return fixAns(nlrob_CM(formula, data, lower=lower, upper=upper,
tol=tol, ctrl=control))
else:
psi = _Mwgt_psi1("huber", cc=1.345)
updateScale = scale is None
if not updateScale:
if isinstance(scale, (float, int)) and scale > 0:
Scale = scale
else:
raise Exception("'scale' must be NULL or a positive number")
if hasWgts and np.any(weights < 0 or np.isnan(weights).any()):
raise Exception("'weights' must be nonnegative and not contain NAs")
data_names = data.keys()
for var in data_names:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
pnames = start.keys()
p0 = start.values[0]
y = data[formula.split("~")[0].rstrip()].values
nobs = y.size
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(pnames):
right_hand_term = right_hand_term.replace(pname, "p0[%d]" % i)
fit = eval(right_hand_term)
resid = y - fit
iris_delta = lambda old, new: np.sqrt(np.sum((old - new)**2)/np.max((1e-20, np.sum(old ** 2))))
converged = False
method_exit = False
status = "converged"
for iiter in range(maxit):
if trace:
print("robust iteration")
previous = eval(test_vec)
if updateScale:
Scale = np.median(np.abs(resid)) / 0.6745
if Scale == 0:
convi = 0
method_exit = True
status = "could not compute scale of residuals"
print(status)
else:
w = psi(resid/Scale)
if hasWgts:
w = w * weights
data.update({"_nlrob_w":w})
out = nls(formula=formula, data=data, start=start, algorithm=algorithm,
lower=lower, upper=upper)
coef = out.get("coefficients")
resid = out.get("residuals")
convi = iris_delta(previous, eval(test_vec))
converged = convi <= tol
if converged:
break
elif trace:
print(" --> irls.delta(previous, %s) = %g -- *not* converged\n" % (test_vec, convi))
if not converged or method_exit:
st = "failed to converge in %d steps" % maxit
print(st)
status = st
if hasWgts:
tmp = weights != 0
w[tmp] = w[tmp] / weights[tmp]
res_sc = resid / Scale
rw = psi(res_sc)
if not converged or not doCov:
asCov = None
else:
AtWAinv = np.linalg.inv(out.get("cov"))
tau = np.mean(rw ** 2) / np.mean(psi(res_sc)) ** 2
asCov = AtWAinv * Scale ** 2 * tau
dictReturn = {"coefficients": coef,
"formula": formula,
"nobs":nobs,
"residuals": resid,
"fitted_values": fit,
"Scale": Scale,
"w": w,
"rweights": rw,
"cov": asCov,
"test_vec": test_vec,
"status": status,
"iter": iiter,
"psi": psi,
"data": data
}
return dictReturn
def nlrob_MM(formula, data, lower, upper, tol=1e-6, psi="bisquare", init="S",
ctrl=NlrobControl("MM")):
"""
Compute an MM-estimator for nonlinear robust (constrained) regression.
Returns a dictionary with all the variables of interest
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
lower: pandas.core.frame.DataFrame
A vector of starting estimates.
upper: array_like
upper bound, the shape could be 1 or the the same of lower.
psi: str
A function (possibly by name) of the form g(x, 'tuning constant(s)',
deriv)
that for deriv=0 returns (x)/x and for deriv=1 returns 0 (x).
init: str
ctrl: object
NlrobControl Class
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
fitted_values : array_like
residuals: array_like
numeric vector of the residuals.
hessian: array_like
hessian matrix
ctrl: object
NlrobControl Class
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLROBMM.Data=Input.csv")
>>> Rfit_MM = nlrob_MM(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob:
nlrob_tau:
nlrob_CM:
nlrob_mtl:
"""
if ctrl:
init = ctrl.init
psi = ctrl.psi
c1 = ctrl.tuning_chi_scale
c2 = ctrl.tuning_psi_M
if psi == "lqq":
c12 = c1[0] + c2[1]
lqqMax = (c1[0] * c1[2] - 2 * c12)/( 1 - c1[2]) + c12
rho1 = lambda t: Mchi(t, c1, psi)
rho2 = lambda t: Mchi(t, c2, psi)
if psi == "bisquare":
rho_inv = lambda y: c1 * np.sqrt(1 - (1 -y) ** (0.3333333))
elif psi == "lqq":
rho_inv = lambda y: np.array(brentq(lambda x: rho1(x) - y, 0, lqqMax))
elif psi == "optimal":
rho_inv = lambda y: np.sqrt(y / 1.38) * c1 * 3
elif psi == "hampel":
def rho_inv(y):
# TODO:
C = MrhoInf(c1, psi)
a = c1[0]
b = c1[1]
r = c1[2]
if a / C > y:
return np.sqrt(2 * C * y)
elif (2 * b - a )/ C > y:
return 0.5 * a + C / a * y
else:
return r + np.sqrt(r ** 2 - ((r - b) * (2 *C / a * y + (b - a)) \
- b*r ))
else:
raise Exception("Psi function '%s' not supported yet" % psi)
M_scale = lambda sigma, u: np.sum(rho1(u / sigma)) / nobs - 0.5
globals().update({"M_scale": M_scale})
# Defining local variables of functions
data_vars = data.keys()
for var in data_vars:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
par = lower.keys()
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(par):
right_hand_term = right_hand_term.replace(pname, "vector[%d]" % i)
globals().update({"right_hand_term": right_hand_term})
if init == "lts":
def objective_initial(vector):
global right_hand_term
y_hat = eval(right_hand_term)
return np.sum(np.sort(y - y_hat)[:h] ** 2)
elif init == "S":
def objective_initial(vector):
global right_hand_term
y_hat = eval(right_hand_term)
res = y - y_hat
med_abs_res = np.median(np.abs(res))
return np.array(brentq(M_scale, constant[0] * med_abs_res, constant[1] * med_abs_res, args=(res)))
else:
raise Exception("Initialization 'init = \"%s\"' not supported (yet)" %init)
def objective_M(vector, sigma):
global right_hand_term
y_hat = eval(right_hand_term)
return np.sum(rho2( (y - y_hat) / sigma))
def fminfn(p, sigma):
global fnscale
global parscale
return objective_M(p * parscale, sigma) / fnscale
def fmingr(p, sigma):
global fnscale
global parscale
x = np.zeros_like(p)
df = np.zeros_like(p)
for i in range(p.size):
epsused = eps = 1e-3
tmp = p[i] + eps
if tmp > upper[i]:
tmp = upper[i]
epsused = tmp - p[i]
x[i] = tmp * parscale[i]
s = objective_M(p, sigma)
val1 = s / fnscale
tmp = p[i] - eps
if (tmp < lower[i]):
tmp = lower[i]
eps = p[i] - tmp
x[i] = tmp * parscale[i]
s = objective_M(p, sigma)
val2 = s/ fnscale
df[i] = (val1 - val2)/(epsused + eps)
if df[i] == np.Inf or df[i] == -np.Inf:
raise Exception("non-finite finite-difference value [%d]" % i+1)
x[i] = p[i] * parscale[i]
return df
npar = len(par)
nobs = y.size
if npar > nobs:
raise Exception("npar > nobs")
if ctrl.fnscale:
fnscale = ctrl.fnscale
else:
fnscale = np.sum((y - np.mean(y)) ** 2)
globals().update({"fnscale": fnscale})
if psi == "bisquare":
constant = [1 / c1]
elif psi == "lqq":
constant = [1 / lqqMax]
elif psi == "optimal":
constant = [1 / c1 * 1 / 3]
elif psi == "hampel":
constant = [1 / c1[2]]
constant.append(2 / rho_inv(2 / (nobs + 2)) if nobs % 2 else 1 / rho_inv( 1 /(nobs + 1)))
globals().update({"constant": constant})
if init == "lts":
h = (nobs + npar + 1) // 2
if npar > upper.size:
upper = np.repeat(upper, npar)
initial = JDEoptim(lower.values[0], upper, objective_initial, tol=tol,
fnscale=fnscale)
parscale = initial.get("par")
globals().update({"parscale": parscale})
for var in par:
exec("%s = lower['%s'].values" % (var, var), globals(), locals())
res = y - eval(formula.split("~")[1])
med_abs_res = np.median(np.abs(res))
sigma = np.array(brentq(M_scale, constant[0] * med_abs_res, constant[1] * med_abs_res, args=(res)))
lower = lower.values.ravel()
bounds = Bounds(lb=lower, ub=upper)
globals().update({"sigma": sigma})
M = minimize(fminfn, initial.get("par"), jac=fmingr, args=sigma, method='L-BFGS-B',
bounds=bounds, tol=tol)
coef = dict(zip(par, M.x))
if M.status == 0:
status = "converged"
elif M.status == 1:
status = "maximum number of iterations reached without convergence"
else:
status = M.message
for var in par:
exec("%s = coef['%s']" % (var, var), globals(), locals())
try:
hess = np.linalg.inv(M.hess_inv.todense())
except:
hess = None
vector = M.x
fit = eval(right_hand_term)
dictReturn = {"formula": formula,
"nobs": nobs,
"coefficients": coef,
"fitted_values": fit,
"residuals": y - fit,
"ctrl": ctrl,
"crit": M.fun,
"initial": initial,
"Scale": sigma,
"status": status,
"hessian": hess}
return dictReturn
def nlrob_tau(formula, data, lower, upper, tol=1e-6, psi="bisquare",
ctrl=NlrobControl("tau"), tuning_chi_scale=None,
tuning_chi_tau=None):
"""
Computes a Tau-estimator for nonlinear robust (constrained) regression.
Returns a dictionary with all the variables of interest
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
lower: pandas.core.frame.DataFrame
Dataframe with the initial guesses
upper: array_like
upper bound, the shape could be 1 or the the same of lower.
psi: str
A function (possibly by name) of the form g(x, 'tuning constant(s)',
deriv)
that for deriv=0 returns (x)/x and for deriv=1 returns 0 (x).
init: str
ctrl: object
NlrobControl Class
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
fitted_values : array_like
residuals: array_like
numeric vector of the residuals.
hessian: array_like
hessian matrix
ctrl: object
NlrobControl Class
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLROBTAU.Data=Input.csv")
>>> Rfit_tau = nlrob_tau(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob:
nlrob_MM:
nlrob_CM:
nlrob_mtl:
"""
if ctrl:
psi = ctrl.psi
# Defining local variables of functions
data_names = data.keys()
for var in data_names:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
if ctrl.tuning_chi_scale is None:
if psi == "bisquare":
_chi_s = {"b": 0.2, "cc": 1.55}
elif psi == "optimal":
_chi_s = {"b": 0.5, "cc": 0.405}
if ctrl.tuning_chi_tau is None:
if psi == "bisquare":
_chi_t = {"b": 0.46, "cc": 6.04}
elif psi == "optimal":
_chi_t = {"b": 0.128, "cc": 1.06}
b1 = _chi_s.get("b")
c1 = _chi_s.get("cc")
b2 = _chi_t.get("b")
c2 = _chi_t.get("cc")
if psi == "bisquare":
b1 = b1 / MrhoInf(c1, psi)
b2 = b2 / MrhoInf(c1, psi)
rho1 = lambda t: Mchi(t, c1, psi)
rho2 = lambda t: Mchi(t, c2, psi)
if psi == "bisquare":
rho_inv = lambda y: c1 * np.sqrt(1 - (1 - y)**(1/3))
elif psi == "optimal":
rho_inv = lambda y: np.sqrt(y/1.38) * c1 * 3
M_scale = lambda sigma, u: np.sum( rho1(u/sigma) )/nobs - b1
tau_scale2 = lambda u, sigma: sigma ** 2 * 1 / b2 * np.sum(rho2(u / sigma))/ nobs
par = lower.keys()
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(par):
right_hand_term = right_hand_term.replace(pname, "vector[%d]" % i)
def objective(vector):
fit = eval(right_hand_term)
res = y - fit
med_abs_res = np.median(np.abs(res))
sigma = np.array(brentq(M_scale, constant[0] * med_abs_res,
constant[1] * med_abs_res, args=(res)))
return tau_scale2(res, sigma)
npar = len(par)
nobs = y.size
if npar > nobs:
raise Exception("npar > nobs")
if ctrl.fnscale:
fnscale = ctrl.fnscale
else:
fnscale = np.mean((y - np.mean(y)) ** 2)
if psi == "bisquare":
constant = [1 / c1]
elif psi == "optimal":
constant = [1 / c1 * 1 / 3]
constant.append(2 / rho_inv(2 / (nobs + 2)) if nobs % 2 else 1 / rho_inv( 1 /(nobs + 1)))
if npar > upper.size:
upper = np.repeat(upper, npar)
optRes = JDEoptim(lower.values[0], upper, objective, tol=tol,
fnscale=fnscale)
it = optRes.get("iter")
status = "converged" if optRes.get("convergence") == 0 else "failed to convergence in %d steps" % it
coef = dict(zip(lower.keys(), optRes.get("par")))
vector = optRes.get("par")
fit = eval(right_hand_term)
dictReturn = {"formula": formula,
"nobs": nobs,
"coefficients": coef,
"fitted_values": fit,
"residuals": y - fit,
"crit": optRes.get("value"),
"initial": optRes,
"Scale": np.sqrt(optRes.get("value")),
"status": status,
"iter": it,
"ctrl": ctrl}
return dictReturn
def nlrob_CM(formula, data, lower, upper, tol=1e-6, psi="bisquare",
ctrl=NlrobControl("CM")):
"""
Compute an MM-estimator for nonlinear robust (constrained) regression.
Returns a dictionary with all the variables of interest
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
lower: pandas.core.frame.DataFrame
Dataframe with the initial guesses
upper: array_like
upper bound, the shape could be 1 or the the same of lower.
psi: str
A function (possibly by name) of the form g(x, 'tuning constant(s)',
deriv)
that for deriv=0 returns (x)/x and for deriv=1 returns 0 (x).
init: str
ctrl: object
NlrobControl Class
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
fitted_values : array_like
residuals: array_like
numeric vector of the residuals.
hessian: array_like
hessian matrix
ctrl: object
NlrobControl Class
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLROBCM.Data=Input.csv")
>>> Rfit_CM = nlrob_cm(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob:
nlrob_MM:
nlrob_tau:
nlrob_mtl:
"""
if ctrl:
psi = ctrl.psi
if psi == "bisquare":
t_chi = {"b": 0.5, "cc":1, "c":4.835}
b = t_chi.get("b")
c = t_chi.get("c")
cc = t_chi.get("cc")
rho = lambda t: Mchi(t, cc, psi)
M_scale = lambda sigma, u: np.sum(rho(u / sigma)) / nobs - b
# Defining local variables of functions
data_names = data.keys()
for var in data_names:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
pnames = [name for name in lower.keys() if name in lower.keys() or name == "sigma"]
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(pnames):
right_hand_term = right_hand_term.replace(pname, "vector[%d]" % i)
if "sigma" in pnames:
if "sigma" in formula.split("~")[1] or "sigma" in data.keys():
raise Exception("As \"sigma\" is in 'pnames', do not use it as variable or parameter name in 'formula'")
def objective(vector):
fit = eval(right_hand_term)
sigma = vector[-1]
return c * np.sum(rho( (y - fit)/sigma ))/nobs + np.log(sigma)
def con(vector):
fit = eval(right_hand_term)
return M_scale(vector[-1], y - fit)
else:
def objective(vector):
fit = eval(right_hand_term)
res = y - fit
sigma = np.median(np.abs(res - np.median(res)))
return c * np.sum(rho(res / sigma)) / nobs + np.log(sigma)
con = None
npar = len(pnames)
nobs = y.size
if npar > nobs:
raise Exception("npar > nobs")
if ctrl.fnscale:
fnscale = ctrl.fnscale
else:
fnscale = np.mean((y - np.mean(y)) ** 2)
if npar > upper.size:
upper = np.repeat(upper, lower.size)
optRes = JDEoptim(lower.values[0], upper, objective, tol=tol,
fnscale=fnscale, constr=con)
it = optRes.get("iter")
status = "converged" if optRes.get("convergence") == 0 else "failed to convergence in %d steps" % it
coef = dict(zip(lower.keys(), optRes.get("par")))
vector = optRes.get("par")
fit = eval(right_hand_term)
dictReturn = {"formula": formula,
"nobs": nobs,
"coefficients": coef,
"fitted_values": fit,
"residuals": y - fit,
"crit": optRes.get("value"),
"status": status,
"iter": it,
"ctrl": ctrl}
return dictReturn
def nlrob_mtl(formula, data, lower, upper, tol=1e-6, psi="bisquare",
ctrl=NlrobControl("mtl")):
"""
Compute a mtl-estimator for nonlinear robust (constrained) regression
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
lower: pandas.core.frame.DataFrame
Dataframe with the initial guesses
upper: array_like
upper bound, the shape could be 1 or the the same of lower.
psi: str
A function (possibly by name) of the form g(x, 'tuning constant(s)',
deriv)
that for deriv=0 returns (x)/x and for deriv=1 returns 0 (x).
init: str
ctrl: object
NlrobControl Class
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
fitted_values : array_like
residuals: array_like
numeric vector of the residuals.
hessian: array_like
hessian matrix
ctrl: object
NlrobControl Class
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLROBMTL.Data=Input.csv")
>>> Rfit_mtl = nlrob_mtl(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob:
nlrob_MM:
nlrob_tau:
nlrob_cm:
"""
if ctrl:
cutoff = ctrl.cutoff
def trim(t):
t = np.sort(t)
i = np.where(t >= cutoff)[0]
partial_h = np.min( (i - 1)/(2 * np.random.normal(t[i]) - 1))
partial_h = np.max(np.floor(partial_h))
h = np.max([hlow, partial_h]) if i.size else nobs
return {"h": h, "t": t}
rho = lambda t: Mchi(t, cc, psi)
M_scale = lambda sigma, u: np.sum(rho(u / sigma)) / nobs - b
# Defining local variables of functions
data_names = data.keys()
for var in data_names:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
pnames = [name for name in lower.keys() if name in lower.keys() or name == "sigma"]
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(pnames):
right_hand_term = right_hand_term.replace(pname, "vector[%d]" % i)
constant = np.log(2 * np.pi)
if "sigma" in pnames:
if "sigma" in formula.split("~")[1] or "sigma" in data.keys():
raise Exception("As \"sigma\" is in 'pnames', do not use it as variable or parameter name in 'formula'")
def objective(vector):
fit = eval(right_hand_term)
sigma = vector[-1]
tp = trim(np.abs( (y - fit) / sigma))
h = tp.get("h")
return h * (constant + 2 * np.log(sigma)) + np.sum(tp.get("t")[:h] ** 2)
else:
def objective(vector):
fit = eval(right_hand_term)
res = y - fit
sigma = np.median(np.abs(res - np.median(res)))
tp = trim(np.abs(res / sigma))
h = int(tp.get("h"))
return h * (constant + 2 * np.log(sigma)) + np.sum(tp.get("t")[:h] ** 2)
npar = len(pnames)
nobs = y.size
if npar > nobs:
raise Exception("npar > nobs")
if ctrl.fnscale:
fnscale = ctrl.fnscale
else:
fnscale = np.mean((y - np.mean(y)) ** 2)
if npar > upper.size:
upper = np.repeat(upper, lower.size)
hlow = (nobs + npar + 1) // 2
optRes = JDEoptim(lower.values[0], upper, objective, tol=tol,
fnscale=fnscale)
it = optRes.get("iter")
status = "converged" if optRes.get("convergence") == 0 else "failed to convergence in %d steps" % it
coef = dict(zip(lower.keys(), optRes.get("par")))
vector = optRes.get("par")
fit = eval(right_hand_term)
res = y - fit
quan = trim( res/(coef["sigma"] if ("sigma" in pnames) else np.median(np.abs(res - np.median(res))))).get("h")
dictReturn = {"formula": formula,
"nobs": nobs,
"coefficients": coef,
"fitted_values": fit,
"residuals": res,
"crit": optRes.get("value"),
"quan": quan,
"status": status,
"iter": it,
"ctrl": ctrl}
return dictReturn
def nls(formula, data, start, algorithm="lm",
weights=None, lower=np.array([-np.Inf]), upper=np.array([np.Inf])):
"""
Determine the nonlinear (weighted) least-squares estimates of the parameters of a nonlinear model.
Usage
nls(formula, data, start, control, algorithm,
trace, subset, weights, na.action, model,
lower, upper, ...)
Parameters
----------
formula: str
a nonlinear model formula including variables and parameters. Will be coerced to a formula if necessary.
data: pandas.core.frame.DataFrame
Data frame in which to evaluate the variables in formula and weights. Can also be a list or an environment, but not a matrix.
start: pandas.core.frame.DataFrame
A vector of starting estimates.
algorithm: str
Character string specifying the algorithm to use. The default algorithm is a "lm" algorithm. Other possible values are 'trf', 'dogbox'
lower: scalar, array_like
Lower bounds on Parameters. An array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters.)
upper: scalar, array_like
Upper bounds on Parameters. An array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters.)
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
residuals: array_like
numeric vector of the residuals.
cov: array_like
covariance matrix
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLS.Data=Input.csv")
>>> Rfit_nls = nls(formula, data, lower)
See Also
--------
nlrob:
nlrob_MM:
nlrob_tau:
nlrob_cm:
"""
for var in data.keys():
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for pname in data.keys():
i = 0
if pname in right_hand_term:
break
pnames = start.keys()
p0 = start.values[0]
x = pname
if lower.any():
if lower.size != p0.size:
lower = np.repeat(lower, p0.size)
if upper.any():
if upper.size != p0.size:
upper = np.repeat(upper, p0.size)
bounds = (lower, upper)
def get_func():
env = {"np": np}
code = "def func(%s, %s):\n" % (x, ", ".join(pnames))
code += " return %s\n" % right_hand_term
exec(code, env)
return env.get("func")
other_params = "bounds=bounds, method='%s'" % algorithm
func = get_func()
par, cov = eval("curve_fit(func, %s, y, p0, %s)" % (x, other_params),
globals(), locals())
popt_str = ["par[%d]" % i for i, p in enumerate(par)]
fit = eval("func(%s, %s)" % (x, ", ".join(popt_str)))
res = y - fit
dictReturn = {"coefficients": dict(zip(start.keys(), par)),
"residuals": res,
"cov":cov}
return dictReturn | /robustPy-1.0.tar.gz/robustPy-1.0/nlrob/nlrob.py | 0.701611 | 0.364806 | nlrob.py | pypi |
import logging
from concurrent.futures.process import ProcessPoolExecutor
from typing import Dict, List, NamedTuple, Optional, Type, Union
from hikaru.model.rel_1_26 import DaemonSet, HorizontalPodAutoscaler, Job, Node, NodeList, StatefulSet
from pydantic.main import BaseModel
from robusta.core.model.env_vars import ALERT_BUILDER_WORKERS, ALERTS_WORKERS_POOL
from robusta.core.model.events import ExecutionBaseEvent
from robusta.core.playbooks.base_trigger import BaseTrigger, TriggerEvent
from robusta.core.reporting.base import Finding
from robusta.integrations.helper import exact_match, prefix_match
from robusta.integrations.kubernetes.custom_models import RobustaDeployment, RobustaJob, RobustaPod
from robusta.integrations.prometheus.models import PrometheusAlert, PrometheusKubernetesAlert
from robusta.utils.cluster_provider_discovery import cluster_provider
class PrometheusTriggerEvent(TriggerEvent):
alert: PrometheusAlert
def get_event_name(self) -> str:
return PrometheusTriggerEvent.__name__
def get_event_description(self) -> str:
alert_name = self.alert.labels.get("alertname", "NA")
alert_severity = self.alert.labels.get("severity", "NA")
return f"PrometheusAlert-{alert_name}-{alert_severity}"
class ResourceMapping(NamedTuple):
hikaru_class: Union[
Type[RobustaPod],
Type[RobustaDeployment],
Type[Job],
Type[DaemonSet],
Type[StatefulSet],
Type[HorizontalPodAutoscaler],
]
attribute_name: str
prometheus_label: str
MAPPINGS = [
ResourceMapping(RobustaDeployment, "deployment", "deployment"),
ResourceMapping(DaemonSet, "daemonset", "daemonset"),
ResourceMapping(StatefulSet, "statefulset", "statefulset"),
ResourceMapping(RobustaJob, "job", "job_name"),
ResourceMapping(RobustaPod, "pod", "pod"),
ResourceMapping(HorizontalPodAutoscaler, "hpa", "horizontalpodautoscaler"),
]
class PrometheusAlertTrigger(BaseTrigger):
"""
:var status: one of "firing", "resolved", or "all"
"""
alert_name: str = None
status: str = "firing"
pod_name_prefix: str = None
namespace_prefix: str = None
instance_name_prefix: str = None
k8s_providers: Optional[List[str]]
def get_trigger_event(self):
return PrometheusTriggerEvent.__name__
def should_fire(self, event: TriggerEvent, playbook_id: str):
if not isinstance(event, PrometheusTriggerEvent):
return False
labels = event.alert.labels
if not exact_match(self.alert_name, labels["alertname"]):
return False
if self.status != "all" and not exact_match(self.status, event.alert.status):
return False
if not prefix_match(self.pod_name_prefix, labels.get("pod")):
return False
if not prefix_match(self.namespace_prefix, labels.get("namespace")):
return False
if not prefix_match(self.instance_name_prefix, labels.get("instance")):
return False
provider = cluster_provider.get_cluster_provider()
if provider and self.k8s_providers and len(self.k8s_providers) > 0:
lowercase_provider = [provider.lower() for provider in self.k8s_providers]
if provider.lower() not in lowercase_provider:
return False
return True
def build_execution_event(
self, event: PrometheusTriggerEvent, sink_findings: Dict[str, List[Finding]]
) -> Optional[ExecutionBaseEvent]:
return AlertEventBuilder.build_event(event, sink_findings)
@staticmethod
def get_execution_event_type() -> type:
return PrometheusKubernetesAlert
class PrometheusAlertTriggers(BaseModel):
on_prometheus_alert: Optional[PrometheusAlertTrigger]
class AlertEventBuilder:
executor = ProcessPoolExecutor(max_workers=ALERT_BUILDER_WORKERS)
@classmethod
def __find_node_by_ip(cls, ip) -> Optional[Node]:
nodes: NodeList = NodeList.listNode().obj
for node in nodes.items:
addresses = [a.address for a in node.status.addresses]
logging.info(f"node {node.metadata.name} has addresses {addresses}")
if ip in addresses:
return node
return None
@classmethod
def __load_node(cls, alert: PrometheusAlert, node_name: str) -> Optional[Node]:
node = None
try:
# sometimes we get an IP:PORT instead of the node name. handle that case
if ":" in node_name:
node = cls.__find_node_by_ip(node_name.split(":")[0])
else:
node = Node().read(name=node_name)
except Exception as e:
logging.info(f"Error loading Node kubernetes object {alert}. error: {e}")
return node
@staticmethod
def _build_event_task(
event: PrometheusTriggerEvent, sink_findings: Dict[str, List[Finding]]
) -> Optional[ExecutionBaseEvent]:
labels = event.alert.labels
execution_event = PrometheusKubernetesAlert(
sink_findings=sink_findings,
alert=event.alert,
alert_name=labels["alertname"],
alert_severity=labels.get("severity"),
label_namespace=labels.get("namespace", None),
)
namespace = labels.get("namespace", "default")
for mapping in MAPPINGS:
try:
resource_name = labels.get(mapping.prometheus_label, None)
if not resource_name or "kube-state-metrics" in resource_name:
continue
resource = mapping.hikaru_class().read(name=resource_name, namespace=namespace)
setattr(execution_event, mapping.attribute_name, resource)
logging.info(
f"Successfully loaded Kubernetes resource {resource_name} for alert {execution_event.alert_name}"
)
except Exception as e:
reason = getattr(e, "reason", "NA")
status = getattr(e, "status", 0)
logging.info(
f"Error loading kubernetes {mapping.attribute_name} {namespace}/{resource_name}. "
f"reason: {reason} status: {status}"
)
node_name = labels.get("node")
if node_name:
execution_event.node = AlertEventBuilder.__load_node(execution_event.alert, node_name)
# we handle nodes differently than other resources
node_name = labels.get("instance", None)
job_name = labels.get("job", None) # a prometheus "job" not a kubernetes "job" resource
# when the job_name is kube-state-metrics "instance" refers to the IP of kube-state-metrics not the node
# If the alert has pod, the 'instance' attribute contains the pod ip
if not execution_event.node and node_name and job_name != "kube-state-metrics":
execution_event.node = AlertEventBuilder.__load_node(execution_event.alert, node_name)
return execution_event
@staticmethod
def build_event(
event: PrometheusTriggerEvent, sink_findings: Dict[str, List[Finding]]
) -> Optional[ExecutionBaseEvent]:
if ALERTS_WORKERS_POOL:
future = AlertEventBuilder.executor.submit(AlertEventBuilder._build_event_task, event, sink_findings)
return future.result()
else:
return AlertEventBuilder._build_event_task(event, sink_findings) | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/prometheus/trigger.py | 0.792986 | 0.178992 | trigger.py | pypi |
import logging
import re
from queue import PriorityQueue
from typing import Dict, List, Tuple, Union
from robusta.core.reporting import (
BaseBlock,
FileBlock,
Finding,
FindingSeverity,
HeaderBlock,
ListBlock,
MarkdownBlock,
TableBlock,
)
from robusta.core.reporting.utils import add_pngs_for_all_svgs
from robusta.core.sinks.jira.jira_sink_params import JiraSinkParams
from robusta.integrations.jira.client import JiraClient
SEVERITY_EMOJI_MAP = {
FindingSeverity.HIGH: ":red_circle:",
FindingSeverity.MEDIUM: ":large_orange_circle:",
FindingSeverity.LOW: ":large_yellow_circle:",
FindingSeverity.INFO: ":large_green_circle:",
}
SEVERITY_COLOR_MAP = {
FindingSeverity.HIGH: "#d11818",
FindingSeverity.MEDIUM: "#e48301",
FindingSeverity.LOW: "#ffdc06",
FindingSeverity.INFO: "#05aa01",
}
STRONG_MARK_REGEX = r"\*{1}[\w|\s\d%!><=\-:;@#$%^&()\.\,\]\[\\\/'\"]+\*{1}"
ITALIAN_MARK_REGEX = r"(^|\s+)_{1}[\w|\s\d%!*><=\-:;@#$%^&()\.\,\]\[\\\/'\"]+_{1}(\s+|$)"
CODE_REGEX = r"`{1,3}[\w|\s\d%!*><=\-:;@#$%^&()\.\,\]\[\\\/'\"]+`{1,3}"
def to_paragraph(txt, attrs=None):
marks = {}
if attrs:
marks = {"marks": [*attrs]}
return {"text": txt, "type": "text", **marks}
def _union_lists(*arrays):
return [el for arr in arrays for el in arr]
def to_italian_text(txt, marks=None):
marks = _union_lists((marks or []), [{"type": "em"}])
return to_markdown_text(txt, ITALIAN_MARK_REGEX, marks, "_")
def to_code_text(txt, marks=None):
marks = _union_lists((marks or []), [{"type": "code"}])
return to_markdown_text(txt, CODE_REGEX, marks, "```")
def to_strong_text(txt, marks=None):
marks = _union_lists((marks or []), [{"type": "strong"}])
return to_markdown_text(txt, STRONG_MARK_REGEX, marks, "*")
def to_markdown_text(txt, regex, marks, replacement_char):
return to_paragraph(re.sub(regex, lambda m: m.group(0).replace(replacement_char, ""), txt), marks)
MARKDOWN_MAPPER = {
lambda x: re.search(STRONG_MARK_REGEX, x): {
"split": lambda x: re.split(f"({STRONG_MARK_REGEX})", x),
"replace": lambda x, marks=None: to_strong_text(x, marks),
},
lambda x: re.search(ITALIAN_MARK_REGEX, x): {
"split": lambda x: re.split(f"({ITALIAN_MARK_REGEX})", x),
"replace": lambda x, marks=None: to_italian_text(x, marks),
},
lambda x: re.search(CODE_REGEX, x): {
"split": lambda x: re.split(f"({CODE_REGEX})", x),
"replace": lambda x, marks=None: to_code_text(x, marks),
},
}
class JiraSender:
def __init__(self, cluster_name: str, account_id: str, params: JiraSinkParams):
self.cluster_name = cluster_name
self.account_id = account_id
self.params = params
print(self.params.dedups)
logging.info(self.params.dedups)
self.client = JiraClient(self.params)
def _markdown_to_jira(self, text):
# Using priority queue to determine which markdown to eject first. Bigger text -
# bigger priority.
pq = PriorityQueue()
for condition in MARKDOWN_MAPPER.keys():
search = condition(text)
if search:
# Priority queue puts the smallest number first, so we need to replace
# start with beginning
match_length = search.span()[0] - search.span()[1]
pq.put_nowait((match_length, condition))
text = [to_paragraph(text)]
while not pq.empty():
_, condition = pq.get_nowait()
funcs = MARKDOWN_MAPPER[condition]
func_split, func_replace = funcs["split"], funcs["replace"]
i = 0
while i < len(text):
text_part = text[i]["text"]
marks = text[i].get("marks", None)
parts = func_split(text_part)
new_parts = []
for part in parts:
if not len(part):
continue
part = func_replace(part, marks) if condition(part) else to_paragraph(part, marks)
new_parts.append(part)
text = text[:i] + new_parts + text[i + 1 :]
i += len(new_parts)
return text
def __to_jira(self, block: BaseBlock, sink_name: str) -> List[Union[Dict[str, str], Tuple[str, bytes, str]]]:
if isinstance(block, MarkdownBlock):
if not block.text:
return []
return [{"type": "paragraph", "content": self._markdown_to_jira(block.text)}]
elif isinstance(block, FileBlock):
return [(block.filename, block.contents, "application/octet-stream")]
elif isinstance(block, HeaderBlock):
return self.__to_jira(MarkdownBlock(block.text), sink_name)
elif isinstance(block, TableBlock):
return [{"type": "codeBlock", "content": [{"text": block.to_markdown().text, "type": "text"}]}]
elif isinstance(block, ListBlock):
return [
{
"type": "bulletList",
"content": [
{
"type": "listItem",
"content": [{"type": "paragraph", "content": [{"type": "text", "text": str(item)}]}],
}
for item in block.items
],
}
]
else:
logging.warning(f"cannot convert block of type {type(block)} to jira format block")
return [] # no reason to crash the entire report
def __parse_blocks_to_jira(self, report_blocks: List[BaseBlock]):
# Process attachment blocks
file_blocks = add_pngs_for_all_svgs([b for b in report_blocks if isinstance(b, FileBlock)])
# Process attachment blocks
other_blocks = [b for b in report_blocks if not isinstance(b, FileBlock)]
if not self.params.send_svg:
file_blocks = [b for b in file_blocks if not b.filename.endswith(".svg")]
output_blocks = []
for block in other_blocks:
output_blocks.extend(self.__to_jira(block, self.params.name))
output_file_blocks = []
for block in file_blocks:
output_file_blocks.extend(self.__to_jira(block, self.params.name))
return output_blocks, output_file_blocks
def send_finding_to_jira(
self,
finding: Finding,
platform_enabled: bool,
):
blocks: List[BaseBlock] = []
actions = []
if platform_enabled: # add link to the robusta ui, if it's configured
investigate_url = finding.get_investigate_uri(self.account_id, self.cluster_name)
actions.append(to_paragraph("🔎 Investigate", [{"type": "link", "attrs": {"href": investigate_url}}]))
if finding.add_silence_url:
actions.append(
to_paragraph(
"🔕 Silence",
[
{
"type": "link",
"attrs": {
"href": finding.get_prometheus_silence_url(self.account_id, self.cluster_name)
},
}
],
)
)
for video_link in finding.video_links:
actions.append(
to_paragraph(f"🎬 {video_link.name}", [{"type": "link", "attrs": {"href": video_link.url}}])
)
actions = [{"type": "paragraph", "content": actions}]
# first add finding description block
if finding.description:
blocks.append(MarkdownBlock(finding.description))
for enrichment in finding.enrichments:
blocks.extend(enrichment.blocks)
output_blocks, file_blocks = self.__parse_blocks_to_jira(blocks)
logging.debug("Creating issue")
labels = []
for attr in self.params.dedups:
if hasattr(finding, attr):
labels.append(getattr(finding, attr))
elif attr in finding.attribute_map:
labels.append(finding.attribute_map[attr])
elif attr == "cluster_name":
labels.append(self.cluster_name)
self.client.create_issue(
{
"description": {"type": "doc", "version": 1, "content": actions + output_blocks},
"summary": finding.title,
"labels": labels,
},
file_blocks,
) | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/jira/sender.py | 0.703244 | 0.484258 | sender.py | pypi |
import logging
from datetime import datetime, timedelta, tzinfo
from typing import Optional, Union
from robusta.core.model.base_params import PrometheusParams
from robusta.core.model.env_vars import PROMETHEUS_REQUEST_TIMEOUT_SECONDS
from robusta.integrations.prometheus.utils import get_prometheus_connect
class PrometheusAnalyzer:
def __init__(self, prometheus_params: PrometheusParams, prometheus_tzinfo: Optional[tzinfo]):
self.prom = get_prometheus_connect(prometheus_params)
self.default_params = {"timeout": PROMETHEUS_REQUEST_TIMEOUT_SECONDS}
self.prom.check_prometheus_connection(params=self.default_params)
self.prometheus_tzinfo = prometheus_tzinfo or datetime.now().astimezone().tzinfo
def _query(self, promql_query: str, duration: Optional[timedelta] = None, **kwargs) -> list:
if duration:
return self._timed_query(promql_query, duration, **kwargs)
return self._non_timed_query(promql_query)
def _non_timed_query(self, promql_query: str) -> list:
results = self.prom.custom_query(promql_query, self.default_params)
return results
def _get_query_value(self, results: Optional[list], offset: int = 0) -> Optional[float]:
if not results:
return None
result = results[offset].get("value", [None, None])[1]
result = result or results[offset].get("values", [[None, None]])[0][1]
if result:
return float(result)
return result
def _timed_query(self, promql_query: str, duration: timedelta, **kwargs) -> Optional[Union[list, dict]]:
if not self.prometheus_tzinfo:
logging.warning("Prometheus Analyzer was created without tz info, impossible to perform timed queries")
return None
end_time = datetime.now(tz=self.prometheus_tzinfo)
start_time = end_time - duration
step = kwargs.get("step", "1")
results = self.prom.custom_query_range(
promql_query, start_time, end_time, step, {"timeout": self.default_params["timeout"]}
)
return results | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/resource_analysis/prometheus_analyzer.py | 0.887223 | 0.15876 | prometheus_analyzer.py | pypi |
from collections import OrderedDict
from hikaru.model.rel_1_26 import Node
from robusta.core.model.base_params import PrometheusParams
from robusta.core.model.env_vars import PROMETHEUS_REQUEST_TIMEOUT_SECONDS
from robusta.integrations.prometheus.utils import get_prometheus_connect
class NodeCpuAnalyzer:
# TODO: perhaps we should handle this more elegantly by first loading all the data into a pandas dataframe
# and then slicing it different ways
def __init__(self, node: Node, prometheus_params: PrometheusParams, range_size="5m"):
self.node = node
self.range_size = range_size
self.internal_ip = next(addr.address for addr in self.node.status.addresses if addr.type == "InternalIP")
self.prom = get_prometheus_connect(prometheus_params)
self.default_params = {"timeout": PROMETHEUS_REQUEST_TIMEOUT_SECONDS}
self.prom.check_prometheus_connection(params=self.default_params)
def get_total_cpu_usage(self, other_method=False):
"""
Gets the total cpu usage for the node, including both containers and everything running on the host directly
:return: a float between 0 and 1 representing the percentage of total cpus used
"""
if other_method:
return self._query(
f'rate(container_cpu_usage_seconds_total{{node="{self.node.metadata.name}",pod="",id="/"}}[{self.range_size}]) '
f'/ scalar(sum (machine_cpu_cores{{node="{self.node.metadata.name}"}}))'
)
# the instance here refers to the node as identified by its internal IP
# we average by the instance to account for multiple cpus and still return a number between 0-1
return self._query(
f"1"
f"- avg by(instance)(rate("
f' node_cpu_seconds_total{{mode=~"idle", instance=~"{self.node.metadata.name}|{self.internal_ip}:.*"}}[{self.range_size}]'
f"))"
f"- avg by(instance)(rate("
f' node_cpu_seconds_total{{mode=~"iowait", instance=~"{self.node.metadata.name}|{self.internal_ip}:.*"}}[{self.range_size}]'
f"))"
)
def get_total_containerized_cpu_usage(self):
query = self._build_query_for_containerized_cpu_usage(True, True)
return self._query(query)
def get_per_pod_cpu_usage(self, threshold=0.0, normalize_by_cpu_count=True):
"""
Gets the cpu usage of each pod on a node
:param threshold: only return pods with a cpu above threshold
:param normalize_by_cpu_count: should we divide by the number of cpus so that the result is in the range 0-1 regardless of cpu count?
:return: a dict of {[pod_name] : [cpu_usage in the 0-1 range] }
"""
query = self._build_query_for_containerized_cpu_usage(False, normalize_by_cpu_count)
result = self.prom.custom_query(query, params=self.default_params)
pod_value_pairs = [(r["metric"]["pod"], float(r["value"][1])) for r in result]
pod_value_pairs = [(k, v) for (k, v) in pod_value_pairs if v >= threshold]
pod_value_pairs.sort(key=lambda x: x[1], reverse=True)
pod_to_cpu = OrderedDict(pod_value_pairs)
return pod_to_cpu
def get_per_pod_cpu_request(self):
query = f'sum by (pod)(kube_pod_container_resource_requests_cpu_cores{{node="{self.node.metadata.name}"}})'
result = self.prom.custom_query(query, params=self.default_params)
return dict((r["metric"]["pod"], float(r["value"][1])) for r in result)
def _query(self, query):
"""
Runs a simple query returning a single metric and returns that metric
"""
result = self.prom.custom_query(query, params=self.default_params)
return float(result[0]["value"][1])
def _build_query_for_containerized_cpu_usage(self, total, normalized_by_cpu_count):
if total:
grouping = ""
else:
grouping = "by (pod)"
if normalized_by_cpu_count:
# we divide by the number of machine_cpu_cores to return a result in th 0-1 range regardless of cpu count
normalization = f'/ scalar(sum (machine_cpu_cores{{node="{self.node.metadata.name}"}}))'
else:
normalization = ""
# note: it is important to set either image!="" or image="" because otherwise we count everything twice -
# once for the whole pod (image="") and once for each container (image!="")
return (
f"sum(rate("
f' container_cpu_usage_seconds_total{{node="{self.node.metadata.name}",pod!="",image!=""}}[{self.range_size}]'
f")) {grouping} {normalization}"
) | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/resource_analysis/node_cpu_analyzer.py | 0.617974 | 0.306819 | node_cpu_analyzer.py | pypi |
import logging
import re
from itertools import chain
from typing import Any, Dict, List, Tuple, Union
from robusta.core.reporting.base import Finding, FindingSeverity, FindingStatus
from robusta.core.reporting.blocks import (
BaseBlock,
FileBlock,
HeaderBlock,
KubernetesDiffBlock,
ListBlock,
MarkdownBlock,
TableBlock,
)
from robusta.core.reporting.utils import add_pngs_for_all_svgs
from robusta.core.sinks.mattermost.mattermost_sink_params import MattermostSinkParams
from robusta.core.sinks.transformer import Transformer
from robusta.integrations.mattermost.client import MattermostClient
extension_regex = re.compile(r"\.[a-z]+$")
MattermostBlock = Dict[str, Any]
SEVERITY_EMOJI_MAP = {
FindingSeverity.HIGH: ":red_circle:",
FindingSeverity.MEDIUM: ":large_orange_circle:",
FindingSeverity.LOW: ":large_yellow_circle:",
FindingSeverity.INFO: ":large_green_circle:",
}
SEVERITY_COLOR_MAP = {
FindingSeverity.HIGH: "#d11818",
FindingSeverity.MEDIUM: "#e48301",
FindingSeverity.LOW: "#ffdc06",
FindingSeverity.INFO: "#05aa01",
}
MAX_BLOCK_CHARS = 16383 # Max allowed characters for mattermost
class MattermostSender:
def __init__(self, cluster_name: str, account_id: str, client: MattermostClient, sink_params: MattermostSinkParams):
"""
Set the Mattermost webhook url.
"""
self.cluster_name = cluster_name
self.account_id = account_id
self.client = client
self.sink_params = sink_params
@classmethod
def __add_mattermost_title(cls, title: str, status: FindingStatus, severity: FindingSeverity,
add_silence_url: bool) -> str:
icon = SEVERITY_EMOJI_MAP.get(severity, "")
status_str: str = f"{status.to_emoji()} {status.name.lower()} - " if add_silence_url else ""
return f"{status_str}{icon} {severity.name} - **{title}**"
@classmethod
def __format_msg_attachments(cls, mattermost_blocks: List[str], msg_color: str) -> List[Dict]:
return [{"text": "\n".join(mattermost_blocks), "color": msg_color}]
def __to_mattermost(self, block: BaseBlock, sink_name: str) -> Union[str, Tuple]:
if isinstance(block, MarkdownBlock):
return Transformer.to_github_markdown(block.text)
elif isinstance(block, FileBlock):
return block.filename, block.contents
elif isinstance(block, HeaderBlock):
return Transformer.apply_length_limit(block.text, 150)
elif isinstance(block, TableBlock):
return block.to_markdown(max_chars=MAX_BLOCK_CHARS, add_table_header=False).text
elif isinstance(block, ListBlock):
return self.__to_mattermost(block.to_markdown(), sink_name)
elif isinstance(block, KubernetesDiffBlock):
return self.__to_mattermost_diff(block, sink_name)
else:
logging.warning(f"cannot convert block of type {type(block)} to mattermost format block: {block}")
return "" # no reason to crash the entire report
def __to_mattermost_diff(self, block: KubernetesDiffBlock, sink_name: str) -> str:
transformed_blocks = Transformer.to_markdown_diff(block, use_emoji_sign=True)
_blocks = list(
chain(*[self.__to_mattermost(transformed_block, sink_name) for transformed_block in transformed_blocks])
)
return "\n".join(_blocks)
def __send_blocks_to_mattermost(
self,
report_blocks: List[BaseBlock],
title: str,
status: FindingStatus,
severity: FindingSeverity,
msg_color: str,
add_silence_url: bool,
):
# Process attachment blocks
file_blocks = add_pngs_for_all_svgs([b for b in report_blocks if isinstance(b, FileBlock)])
file_attachments = []
if not self.sink_params.send_svg:
file_blocks = [b for b in file_blocks if not b.filename.endswith(".svg")]
for block in file_blocks:
file_attachments.append(self.__to_mattermost(block, self.sink_params.name))
other_blocks = [b for b in report_blocks if not isinstance(b, FileBlock)]
output_blocks = []
header_block = {}
if title:
title = self.__add_mattermost_title(title=title, status=status, severity=severity,
add_silence_url=add_silence_url)
header_block = self.__to_mattermost(HeaderBlock(title), self.sink_params.name)
for block in other_blocks:
output_blocks.append(self.__to_mattermost(block, self.sink_params.name))
attachments = self.__format_msg_attachments(output_blocks, msg_color)
logging.debug(
f"--sending to mattermost--\n"
f"title:{title}\n"
f"blocks: {output_blocks}\n"
f"msg: {attachments}\n"
f"file_attachments: {file_attachments}\n"
)
self.client.post_message(header_block, attachments, file_attachments)
def send_finding_to_mattermost(self, finding: Finding, platform_enabled: bool):
blocks: List[BaseBlock] = []
if platform_enabled: # add link to the robusta ui, if it's configured
actions = f"[:mag_right: Investigate]({finding.get_investigate_uri(self.account_id, self.cluster_name)})"
if finding.add_silence_url:
actions = f"{actions} [:no_bell: Silence]({finding.get_prometheus_silence_url(self.account_id, self.cluster_name)})"
for video_link in finding.video_links:
actions = f"{actions} [:clapper: {video_link.name}]({video_link.url})"
blocks.append(MarkdownBlock(actions))
blocks.append(MarkdownBlock(f"*Source:* `{self.cluster_name}`\n"))
# first add finding description block
if finding.description:
blocks.append(MarkdownBlock(finding.description))
for enrichment in finding.enrichments:
blocks.extend(enrichment.blocks)
status: FindingStatus = (
FindingStatus.RESOLVED if finding.title.startswith("[RESOLVED]") else FindingStatus.FIRING
)
msg_color = status.to_color_hex()
title = finding.title.removeprefix("[RESOLVED] ")
self.__send_blocks_to_mattermost(
report_blocks=blocks,
title=title,
status=status,
severity=finding.severity,
msg_color=msg_color,
add_silence_url=finding.add_silence_url,
) | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/mattermost/sender.py | 0.784773 | 0.191933 | sender.py | pypi |
from enum import Enum
from typing import Callable, List, Optional
from robusta.core.model.base_params import ProcessParams
from robusta.core.reporting.base import Finding
from robusta.core.reporting.blocks import BaseBlock, CallbackBlock, CallbackChoice, MarkdownBlock, TableBlock
from robusta.integrations.kubernetes.custom_models import Process, RobustaPod
class ProcessType(Enum):
PYTHON = "python"
JAVA = "java"
class ProcessFinder:
"""
Find the processes in a Kubernetes pod which match certain filters.
"""
def __init__(self, pod: RobustaPod, filters: ProcessParams, process_type: ProcessType):
if process_type not in {ProcessType.PYTHON, ProcessType.JAVA}:
raise Exception(f"Unsupported process type: {process_type}")
self.pod = pod
self.filters = filters
self.process_type = process_type
self.all_processes = pod.get_processes()
self.matching_processes = self.__get_matches(self.all_processes, filters, process_type)
def get_match_or_report_error(
self, finding: Finding, retrigger_text: str, retrigger_action: Callable, debug_action: Callable
) -> Optional[Process]:
"""
Returns the single-matching process. If more than one process matches, blocks will be added to the Finding
to report the error and optionally allow re-triggering the action with a chosen process.
"""
if self.has_exactly_one_match():
return self.get_exact_match()
elif len(self.matching_processes) == 0:
finding.add_enrichment(
[MarkdownBlock("No matching processes. The processes in the pod are:")]
+ self.__get_error_blocks(self.all_processes, retrigger_text, retrigger_action, debug_action)
)
return None
elif len(self.matching_processes) > 1:
finding.add_enrichment(
[MarkdownBlock("More than one matching process. The matching processes are:")]
+ self.__get_error_blocks(self.matching_processes, retrigger_text, retrigger_action, debug_action)
)
return None
def has_exactly_one_match(self) -> bool:
"""
Returns true when exactly one process matches
"""
return len(self.matching_processes) == 1
def get_pids(self) -> List[int]:
"""
Returns all relevant pids
"""
return [p.pid for p in self.matching_processes]
def get_lowest_relevant_pid(self) -> int:
"""
Returns the lowest pid which is most likely the parent process
"""
return min([p.pid for p in self.matching_processes])
def get_exact_match(self) -> Process:
"""
Returns a process matching this class and throws when there is more than one match
"""
if len(self.matching_processes) != 1:
raise Exception("Only one match is expected")
return self.matching_processes[0]
@staticmethod
def __get_matches(processes: List[Process], filters: ProcessParams, process_type: ProcessType) -> List[Process]:
"""
Returns the processes that match a ProcessParams class
"""
pid_to_process = {p.pid: p for p in processes}
if filters.pid is None:
return [
p
for p in pid_to_process.values()
if process_type.value in p.exe and filters.process_substring in " ".join(p.cmdline)
]
if filters.pid not in pid_to_process:
return []
return [pid_to_process[filters.pid]]
def __get_error_blocks(
self, processes: List[Process], text: str, action: Callable, debug_action: Callable
) -> List[BaseBlock]:
if not processes:
return [MarkdownBlock("No processes")]
blocks = [
TableBlock(
[[p.pid, p.exe, " ".join(p.cmdline)] for p in processes],
["pid", "exe", "cmdline"],
)
]
if self.filters.interactive:
choices = {}
for proc in processes:
updated_params = self.filters.copy()
updated_params.process_substring = ""
updated_params.pid = proc.pid
choices[f"{text} {proc.pid}"] = CallbackChoice(
action=action,
action_params=updated_params,
kubernetes_object=self.pod,
)
choices["Still can't choose?"] = CallbackChoice(
action=debug_action,
action_params=self.filters,
kubernetes_object=self.pod,
)
blocks.append(CallbackBlock(choices))
blocks.append(MarkdownBlock("*After clicking a button please wait up to 120 seconds for a response*"))
return blocks | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/kubernetes/process_utils.py | 0.876079 | 0.38393 | process_utils.py | pypi |
import json
import logging
import re
import time
from enum import Enum, auto
from kubernetes.client import ApiException
from typing import Dict, List, Optional, Tuple, Type, TypeVar
import hikaru
import yaml
from hikaru.model.rel_1_26 import * # * import is necessary for hikaru subclasses to work
from pydantic import BaseModel
from robusta.core.model.env_vars import INSTALLATION_NAMESPACE, RELEASE_NAME
from robusta.integrations.kubernetes.api_client_utils import (
SUCCEEDED_STATE,
exec_shell_command,
get_pod_logs,
prepare_pod_command,
to_kubernetes_name,
upload_file,
wait_for_pod_status,
wait_until_job_complete,
)
from robusta.integrations.kubernetes.templates import get_deployment_yaml
from robusta.utils.parsing import load_json
S = TypeVar("S")
T = TypeVar("T")
# TODO: import these from the python-tools project
PYTHON_DEBUGGER_IMAGE = "us-central1-docker.pkg.dev/genuine-flight-317411/devel/debug-toolkit:v5.0"
JAVA_DEBUGGER_IMAGE = "us-central1-docker.pkg.dev/genuine-flight-317411/devel/java-toolkit-11:jattach"
class Process(BaseModel):
pid: int
exe: str
cmdline: List[str]
class ProcessList(BaseModel):
processes: List[Process]
def _get_match_expression_filter(expression: LabelSelectorRequirement) -> str:
if expression.operator.lower() == "exists":
return expression.key
elif expression.operator.lower() == "doesnotexist":
return f"!{expression.key}"
values = ",".join(expression.values)
return f"{expression.key} {expression.operator} ({values})"
def build_selector_query(selector: LabelSelector) -> str:
label_filters = [f"{label[0]}={label[1]}" for label in selector.matchLabels.items()]
label_filters.extend([_get_match_expression_filter(expression) for expression in selector.matchExpressions])
return ",".join(label_filters)
def list_pods_using_selector(namespace: str, selector: LabelSelector, field_selector: str = None) -> List[Pod]:
labels_selector = build_selector_query(selector)
return PodList.listNamespacedPod(
namespace=namespace,
label_selector=labels_selector,
field_selector=field_selector,
).obj.items
def _get_image_name_and_tag(image: str) -> Tuple[str, str]:
if ":" in image:
image_name, image_tag = image.split(":", maxsplit=1)
return image_name, image_tag
else:
return image, "<NONE>"
def get_images(containers: List[Container]) -> Dict[str, str]:
"""
Takes a list of containers and returns a dict mapping image name to image tag.
"""
name_to_version = {}
for container in containers:
image_name, tag = _get_image_name_and_tag(container.image)
name_to_version[image_name] = tag
return name_to_version
def extract_images(k8s_obj: HikaruDocumentBase) -> Optional[Dict[str, str]]:
images = extract_image_list(k8s_obj)
if not images:
# no containers found on that k8s obj
return None
name_to_version = {}
for image in images:
image_name, tag = _get_image_name_and_tag(image)
name_to_version[image_name] = tag
return name_to_version
def extract_image_list(k8s_obj: HikaruDocumentBase) -> List[str]:
containers_paths = [
[
"spec",
"template",
"spec",
"containers",
], # deployment, replica set, daemon set, stateful set, job
["spec", "containers"], # pod
]
images = []
for path in containers_paths:
try:
for container in k8s_obj.object_at_path(path):
images.append(container.image)
except Exception: # Path not found on object, not a real error
pass
return images
def does_daemonset_have_toleration(ds: DaemonSet, toleration_key: str) -> bool:
return any(t.key == toleration_key for t in ds.spec.template.spec.tolerations)
def does_node_have_taint(node: Node, taint_key: str) -> bool:
return any(t.key == taint_key for t in node.spec.taints)
class RobustaEvent:
@classmethod
def get_events(cls, kind: str, name: str, namespace: str = None) -> EventList:
field_selector = f"regarding.kind={kind},regarding.name={name}"
if namespace:
field_selector += f",regarding.namespace={namespace}"
return EventList.listEventForAllNamespaces(field_selector=field_selector).obj
class RegexReplacementStyle(Enum):
"""
Patterns for replacers, either asterisks "****" matching the length of the match, or the replacement name, e.g "[IP]"
"""
SAME_LENGTH_ASTERISKS = auto()
NAMED = auto()
class NamedRegexPattern(BaseModel):
"""
A named regex pattern
"""
name: str = "Redacted"
regex: str
class RobustaPod(Pod):
def exec(self, shell_command: str, container: str = None) -> str:
"""Execute a command inside the pod"""
if container is None:
container = self.spec.containers[0].name
return exec_shell_command(self.metadata.name, shell_command, self.metadata.namespace, container)
def get_logs(
self,
container=None,
previous=None,
tail_lines=None,
regex_replacer_patterns: Optional[List[NamedRegexPattern]] = None,
regex_replacement_style: Optional[RegexReplacementStyle] = None,
filter_regex: Optional[str] = None,
) -> str:
"""
Fetch pod logs, can replace sensitive data in the logs using a regex
"""
if not container and self.spec.containers:
container = self.spec.containers[0].name
pods_logs = get_pod_logs(
self.metadata.name,
self.metadata.namespace,
container,
previous,
tail_lines,
)
if pods_logs and filter_regex:
regex = re.compile(filter_regex)
pods_logs = "\n".join(re.findall(regex, pods_logs))
if pods_logs and regex_replacer_patterns:
logging.info("Sanitizing log data with the provided regex patterns")
if regex_replacement_style == RegexReplacementStyle.NAMED:
for replacer in regex_replacer_patterns:
pods_logs = re.sub(replacer.regex, f"[{replacer.name.upper()}]", pods_logs)
else:
def same_length_asterisks(match):
return "*" * len((match.group(0)))
for replacer in regex_replacer_patterns:
pods_logs = re.sub(replacer.regex, same_length_asterisks, pods_logs)
return pods_logs
@staticmethod
def exec_in_java_pod(
pod_name: str, node_name: str, debug_cmd=None, override_jtk_image: str = JAVA_DEBUGGER_IMAGE
) -> str:
return RobustaPod.exec_in_debugger_pod(pod_name, node_name, debug_cmd, debug_image=override_jtk_image)
@staticmethod
def create_debugger_pod(
pod_name: str,
node_name: str,
debug_image=PYTHON_DEBUGGER_IMAGE,
debug_cmd=None,
env: Optional[List[EnvVar]] = None,
mount_host_root: bool = False,
) -> "RobustaPod":
"""
Creates a debugging pod with high privileges
"""
volume_mounts = None
volumes = None
if mount_host_root:
volume_mounts = [VolumeMount(name="host-root", mountPath="/host")]
volumes = [Volume(name="host-root", hostPath=HostPathVolumeSource(path="/", type="Directory"))]
debugger = RobustaPod(
apiVersion="v1",
kind="Pod",
metadata=ObjectMeta(
name=to_kubernetes_name(pod_name, "debug-"),
namespace=INSTALLATION_NAMESPACE,
),
spec=PodSpec(
serviceAccountName=f"{RELEASE_NAME}-runner-service-account",
hostPID=True,
nodeName=node_name,
restartPolicy="OnFailure",
containers=[
Container(
name="debugger",
image=debug_image,
imagePullPolicy="Always",
command=prepare_pod_command(debug_cmd),
securityContext=SecurityContext(
capabilities=Capabilities(add=["SYS_PTRACE", "SYS_ADMIN"]), privileged=True
),
volumeMounts=volume_mounts,
env=env,
)
],
volumes=volumes,
),
)
# TODO: check the result code
debugger = debugger.createNamespacedPod(debugger.metadata.namespace).obj
return debugger
@staticmethod
def exec_on_node(pod_name: str, node_name: str, cmd):
command = f'nsenter -t 1 -a "{cmd}"'
return RobustaPod.exec_in_debugger_pod(pod_name, node_name, command)
@staticmethod
def run_debugger_pod(
node_name: str, pod_image: str, env: Optional[List[EnvVar]] = None, mount_host_root: bool = False
) -> str:
debugger = RobustaPod.create_debugger_pod(
node_name, node_name, pod_image, env=env, mount_host_root=mount_host_root
)
try:
pod_name = debugger.metadata.name
pod_namespace = debugger.metadata.namespace
pod_status = wait_for_pod_status(pod_name, pod_namespace, SUCCEEDED_STATE, 360, 0.2)
if pod_status != SUCCEEDED_STATE:
raise Exception(f"pod {pod_name} in {pod_namespace} failed to complete. It is in state {pod_status}")
return debugger.get_logs()
finally:
RobustaPod.deleteNamespacedPod(debugger.metadata.name, debugger.metadata.namespace)
@staticmethod
def exec_in_debugger_pod(pod_name: str, node_name: str, cmd, debug_image=PYTHON_DEBUGGER_IMAGE) -> str:
debugger = RobustaPod.create_debugger_pod(pod_name, node_name, debug_image)
try:
return debugger.exec(cmd)
finally:
RobustaPod.deleteNamespacedPod(debugger.metadata.name, debugger.metadata.namespace)
@staticmethod
def extract_container_id(status: ContainerStatus) -> str:
runtime, container_id = status.containerID.split("://")
return container_id
def get_processes(self) -> List[Process]:
container_ids = " ".join([self.extract_container_id(s) for s in self.status.containerStatuses])
output = RobustaPod.exec_in_debugger_pod(
self.metadata.name,
self.spec.nodeName,
f"debug-toolkit pod-ps {self.metadata.uid} {container_ids}",
)
processes = ProcessList(**load_json(output))
return processes.processes
def get_images(self) -> Dict[str, str]:
return get_images(self.spec.containers)
def has_direct_owner(self, owner_uid) -> bool:
for owner in self.metadata.ownerReferences:
if owner.uid == owner_uid:
return True
return False
def has_toleration(self, toleration_key):
return any(toleration_key == toleration.key for toleration in self.spec.tolerations)
def has_cpu_limit(self) -> bool:
for container in self.spec.containers:
if container.resources and container.resources.limits.get("cpu"):
return True
return False
def upload_file(self, path: str, contents: bytes, container: Optional[str] = None):
if container is None:
container = self.spec.containers[0].name
logging.info(f"no container name given when uploading file, so choosing first container: {container}")
upload_file(
self.metadata.name,
path,
contents,
namespace=self.metadata.namespace,
container=container,
)
@staticmethod
def find_pods_with_direct_owner(namespace: str, owner_uid: str) -> List["RobustaPod"]:
all_pods: List["RobustaPod"] = PodList.listNamespacedPod(namespace).obj.items
return list(filter(lambda p: p.has_direct_owner(owner_uid), all_pods))
@staticmethod
def find_pod(name_prefix, namespace) -> "RobustaPod":
pods: PodList = PodList.listNamespacedPod(namespace).obj
for pod in pods.items:
if pod.metadata.name.startswith(name_prefix):
# we serialize and then deserialize to work around https://github.com/haxsaw/hikaru/issues/15
return hikaru.from_dict(pod.to_dict(), cls=RobustaPod)
raise Exception(f"No pod exists in namespace '{namespace}' with name prefix '{name_prefix}'")
# TODO: replace with Hikaru Pod().read() but note that usage is slightly different as this is a staticmethod
@staticmethod
def read(name: str, namespace: str) -> "RobustaPod":
"""Read pod definition from the API server"""
return Pod.readNamespacedPod(name, namespace).obj
@staticmethod
def wait_for_pod_ready(pod_name: str, namespace: str, timeout: int = 60) -> "RobustaPod":
"""
Waits for the pod to be in Running state
"""
for _ in range(timeout): # retry for up to timeout seconds
try:
pod = RobustaPod().read(pod_name, namespace)
if pod.status.phase == 'Running':
return pod
except ApiException as e:
if e.status != 404: # re-raise the exception if it's not a NotFound error
raise
time.sleep(1)
else:
raise RuntimeError(f"Pod {pod_name} in namespace {namespace} is not ready after {timeout} seconds")
class RobustaDeployment(Deployment):
@classmethod
def from_image(cls: Type[T], name, image="busybox", cmd=None) -> T:
obj: RobustaDeployment = hikaru.from_dict(yaml.safe_load(get_deployment_yaml(name, image)), RobustaDeployment)
obj.spec.template.spec.containers[0].command = prepare_pod_command(cmd)
return obj
def get_images(self) -> Dict[str, str]:
return get_images(self.spec.template.spec.containers)
@staticmethod
def wait_for_deployment_ready(name: str, namespace: str, timeout: int = 60) -> "RobustaDeployment":
"""
Waits for the deployment to be ready, i.e., the expected number of pods are running.
"""
for _ in range(timeout): # retry for up to timeout seconds
try:
deployment = RobustaDeployment().read(name, namespace)
if deployment.status.readyReplicas == deployment.spec.replicas:
return deployment
except ApiException as e:
if e.status != 404: # re-raise the exception if it's not a NotFound error
raise
time.sleep(1)
else:
raise RuntimeError(
f"Deployment {name} in namespace {namespace} is not ready after {timeout} seconds")
class JobSecret(BaseModel):
name: str
data: Dict[str, str]
class RobustaJob(Job):
def get_pods(self) -> List[RobustaPod]:
"""
gets the pods associated with a job
"""
pods: PodList = PodList.listNamespacedPod(
self.metadata.namespace, label_selector=f"job-name = {self.metadata.name}"
).obj
# we serialize and then deserialize to work around https://github.com/haxsaw/hikaru/issues/15
return [hikaru.from_dict(pod.to_dict(), cls=RobustaPod) for pod in pods.items]
def get_single_pod(self) -> RobustaPod:
"""
like get_pods() but verifies that only one pod is associated with the job and returns that pod
"""
pods = self.get_pods()
if len(pods) != 1:
raise Exception(f"got more pods than expected for job: {pods}")
return pods[0]
def create_job_owned_secret(self, job_secret: JobSecret):
"""
This secret will be auto-deleted when the pod is Terminated
"""
# Due to inconsistant GC in K8s the OwnerReference needs to be the pod and not the job (Found in azure)
job_pod = self.get_single_pod()
robusta_owner_reference = OwnerReference(
apiVersion="v1",
kind="Pod",
name=job_pod.metadata.name,
uid=job_pod.metadata.uid,
blockOwnerDeletion=False,
controller=True,
)
secret = Secret(
metadata=ObjectMeta(name=job_secret.name, ownerReferences=[robusta_owner_reference]), data=job_secret.data
)
try:
return secret.createNamespacedSecret(job_pod.metadata.namespace).obj
except Exception as e:
logging.error(f"Failed to create secret {job_secret.name}", exc_info=True)
raise e
@classmethod
def run_simple_job_spec(cls, spec, name, timeout, job_secret: Optional[JobSecret] = None) -> str:
job = RobustaJob(
metadata=ObjectMeta(namespace=INSTALLATION_NAMESPACE, name=to_kubernetes_name(name)),
spec=JobSpec(
backoffLimit=0,
template=PodTemplateSpec(
spec=spec,
),
),
)
try:
job = job.createNamespacedJob(job.metadata.namespace).obj
job = hikaru.from_dict(job.to_dict(), cls=RobustaJob) # temporary workaround for hikaru bug #15
if job_secret:
job.create_job_owned_secret(job_secret)
job: RobustaJob = wait_until_job_complete(job, timeout)
job = hikaru.from_dict(job.to_dict(), cls=RobustaJob) # temporary workaround for hikaru bug #15
pod = job.get_single_pod()
return pod.get_logs()
finally:
job.deleteNamespacedJob(
job.metadata.name,
job.metadata.namespace,
propagation_policy="Foreground",
)
@classmethod
def run_simple_job(cls, image, command, timeout) -> str:
spec = PodSpec(
containers=[
Container(
name=to_kubernetes_name(image),
image=image,
command=prepare_pod_command(command),
)
],
restartPolicy="Never",
)
return cls.run_simple_job_spec(spec, name=image, timeout=timeout)
hikaru.register_version_kind_class(RobustaPod, Pod.apiVersion, Pod.kind)
hikaru.register_version_kind_class(RobustaDeployment, Deployment.apiVersion, Deployment.kind)
hikaru.register_version_kind_class(RobustaJob, Job.apiVersion, Job.kind) | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/kubernetes/custom_models.py | 0.492676 | 0.249087 | custom_models.py | pypi |
import logging
import traceback
from abc import abstractmethod
from dataclasses import dataclass
from typing import List, Optional, Union
from hikaru.model.rel_1_26 import (
ClusterRole,
ClusterRoleBinding,
ConfigMap,
DaemonSet,
Deployment,
Event,
HorizontalPodAutoscaler,
Ingress,
Job,
Namespace,
NetworkPolicy,
Node,
PersistentVolume,
PersistentVolumeClaim,
Pod,
ReplicaSet,
Service,
ServiceAccount,
StatefulSet,
)
from hikaru.model.rel_1_26.v1 import ClusterRole as v1ClusterRole
from hikaru.model.rel_1_26.v1 import ClusterRoleBinding as v1ClusterRoleBinding
from hikaru.model.rel_1_26.v1 import ConfigMap as v1ConfigMap
from hikaru.model.rel_1_26.v1 import DaemonSet as v1DaemonSet
from hikaru.model.rel_1_26.v1 import Deployment as v1Deployment
from hikaru.model.rel_1_26.v1 import Event as v1Event
from hikaru.model.rel_1_26.v1 import (
HorizontalPodAutoscaler as v1HorizontalPodAutoscaler,
)
from hikaru.model.rel_1_26.v1 import Ingress as v1Ingress
from hikaru.model.rel_1_26.v1 import Job as v1Job
from hikaru.model.rel_1_26.v1 import Namespace as v1Namespace
from hikaru.model.rel_1_26.v1 import NetworkPolicy as v1NetworkPolicy
from hikaru.model.rel_1_26.v1 import Node as v1Node
from hikaru.model.rel_1_26.v1 import PersistentVolume as v1PersistentVolume
from hikaru.model.rel_1_26.v1 import PersistentVolumeClaim as v1PersistentVolumeClaim
from hikaru.model.rel_1_26.v1 import Pod as v1Pod
from hikaru.model.rel_1_26.v1 import ReplicaSet as v1ReplicaSet
from hikaru.model.rel_1_26.v1 import Service as v1Service
from hikaru.model.rel_1_26.v1 import ServiceAccount as v1ServiceAccount
from hikaru.model.rel_1_26.v1 import StatefulSet as v1StatefulSet
from hikaru.utils import Response
from pydantic import BaseModel
from robusta.integrations.kubernetes.custom_models import (
RobustaDeployment,
RobustaJob,
RobustaPod,
)
from ....core.model.events import ExecutionBaseEvent, ExecutionEventBaseParams
from ....core.reporting.base import FindingSubject
from ....core.reporting.consts import FindingSource, FindingSubjectType
from ....core.reporting.finding_subjects import KubeObjFindingSubject
from ..base_event import K8sBaseChangeEvent
LOADERS_MAPPINGS = {
"pod": (True, RobustaPod.readNamespacedPod),
"replicaset": (True, ReplicaSet.readNamespacedReplicaSet),
"daemonset": (True, DaemonSet.readNamespacedDaemonSet),
"deployment": (True, RobustaDeployment.readNamespacedDeployment),
"statefulset": (True, StatefulSet.readNamespacedStatefulSet),
"service": (True, Service.readNamespacedService),
"event": (True, Event.readNamespacedEvent),
"horizontalpodautoscaler": (True, HorizontalPodAutoscaler.readNamespacedHorizontalPodAutoscaler),
"node": (False, Node.readNode),
"clusterrole": (False, ClusterRole.readClusterRole),
"clusterrolebinding": (False, ClusterRoleBinding.readClusterRoleBinding),
"job": (True, RobustaJob.readNamespacedJob),
"namespace": (False, Namespace.readNamespace),
"serviceaccount": (True, ServiceAccount.readNamespacedServiceAccount),
"persistentvolume": (False, PersistentVolume.readPersistentVolume),
"persistentvolumeclaim": (True, PersistentVolumeClaim.readNamespacedPersistentVolumeClaim),
"networkpolicy": (True, NetworkPolicy.readNamespacedNetworkPolicy),
"configmap": (True, ConfigMap.readNamespacedConfigMap),
"ingress": (True, Ingress.readNamespacedIngress),
}
class ResourceLoader:
@staticmethod
def read_resource(kind: str, name: str, namespace: str = None) -> Response:
resource_mapper = LOADERS_MAPPINGS[kind.lower()]
if not resource_mapper:
raise Exception("resource loader not found")
if resource_mapper[0]: # namespaced resource
return resource_mapper[1](name=name, namespace=namespace)
else:
return resource_mapper[1](name=name)
class ResourceAttributes(ExecutionEventBaseParams):
kind: str
name: str
namespace: Optional[str] = None
@dataclass
class KubernetesResourceEvent(ExecutionBaseEvent):
obj: Optional[
Union[
RobustaPod,
ReplicaSet,
DaemonSet,
RobustaDeployment,
StatefulSet,
Service,
Event,
HorizontalPodAutoscaler,
Node,
ClusterRole,
ClusterRoleBinding,
RobustaJob,
Namespace,
ServiceAccount,
PersistentVolume,
PersistentVolumeClaim,
NetworkPolicy,
ConfigMap,
Ingress,
]
] = None
def __init__(
self,
obj: Union[
RobustaPod,
ReplicaSet,
DaemonSet,
RobustaDeployment,
StatefulSet,
Service,
Event,
HorizontalPodAutoscaler,
Node,
ClusterRole,
ClusterRoleBinding,
RobustaJob,
Namespace,
ServiceAccount,
PersistentVolume,
PersistentVolumeClaim,
NetworkPolicy,
ConfigMap,
Ingress,
],
named_sinks: List[str],
):
super().__init__(named_sinks=named_sinks)
self.obj = obj
def get_resource(
self,
) -> Optional[
Union[
RobustaPod,
ReplicaSet,
DaemonSet,
RobustaDeployment,
StatefulSet,
Service,
Event,
HorizontalPodAutoscaler,
Node,
ClusterRole,
ClusterRoleBinding,
RobustaJob,
Namespace,
ServiceAccount,
PersistentVolume,
PersistentVolumeClaim,
NetworkPolicy,
ConfigMap,
Ingress,
]
]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@classmethod
def get_source(cls) -> FindingSource:
return FindingSource.KUBERNETES_API_SERVER
@staticmethod
def from_params(params: ResourceAttributes) -> Optional["KubernetesResourceEvent"]:
try:
obj = ResourceLoader.read_resource(kind=params.kind, name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load resource {params}", exc_info=True)
return None
return KubernetesResourceEvent(obj=obj, named_sinks=params.named_sinks)
@dataclass
class KubernetesAnyChangeEvent(K8sBaseChangeEvent):
obj: Optional[
Union[
RobustaDeployment,
RobustaJob,
RobustaPod,
v1ClusterRole,
v1ClusterRoleBinding,
v1ConfigMap,
v1DaemonSet,
v1Event,
v1HorizontalPodAutoscaler,
v1Ingress,
v1Namespace,
v1NetworkPolicy,
v1Node,
v1PersistentVolume,
v1PersistentVolumeClaim,
v1ReplicaSet,
v1Service,
v1ServiceAccount,
v1StatefulSet,
]
] = None
old_obj: Optional[
Union[
RobustaDeployment,
RobustaJob,
RobustaPod,
v1ClusterRole,
v1ClusterRoleBinding,
v1ConfigMap,
v1DaemonSet,
v1Event,
v1HorizontalPodAutoscaler,
v1Ingress,
v1Namespace,
v1NetworkPolicy,
v1Node,
v1PersistentVolume,
v1PersistentVolumeClaim,
v1ReplicaSet,
v1Service,
v1ServiceAccount,
v1StatefulSet,
]
] = None
def get_resource(
self,
) -> Optional[
Union[
RobustaDeployment,
RobustaJob,
RobustaPod,
v1ClusterRole,
v1ClusterRoleBinding,
v1ConfigMap,
v1DaemonSet,
v1Event,
v1HorizontalPodAutoscaler,
v1Ingress,
v1Namespace,
v1NetworkPolicy,
v1Node,
v1PersistentVolume,
v1PersistentVolumeClaim,
v1ReplicaSet,
v1Service,
v1ServiceAccount,
v1StatefulSet,
]
]:
return self.obj
class PodAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class PodEvent(KubernetesResourceEvent):
def __init__(self, obj: RobustaPod, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_pod(self) -> Optional[RobustaPod]:
return self.obj
@staticmethod
def from_params(params: PodAttributes) -> Optional["PodEvent"]:
try:
obj = RobustaPod.readNamespacedPod(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load Pod {params}", exc_info=True)
return None
return PodEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class PodChangeEvent(PodEvent, KubernetesAnyChangeEvent):
obj: Optional[RobustaPod] = None
old_obj: Optional[RobustaPod] = None
def get_pod(self) -> Optional[RobustaPod]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class ReplicaSetAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class ReplicaSetEvent(KubernetesResourceEvent):
def __init__(self, obj: ReplicaSet, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_replicaset(self) -> Optional[ReplicaSet]:
return self.obj
@staticmethod
def from_params(params: ReplicaSetAttributes) -> Optional["ReplicaSetEvent"]:
try:
obj = ReplicaSet.readNamespacedReplicaSet(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load ReplicaSet {params}", exc_info=True)
return None
return ReplicaSetEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class ReplicaSetChangeEvent(ReplicaSetEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1ReplicaSet]] = None
old_obj: Optional[Union[v1ReplicaSet]] = None
def get_replicaset(self) -> Optional[Union[v1ReplicaSet]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class DaemonSetAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class DaemonSetEvent(KubernetesResourceEvent):
def __init__(self, obj: DaemonSet, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_daemonset(self) -> Optional[DaemonSet]:
return self.obj
@staticmethod
def from_params(params: DaemonSetAttributes) -> Optional["DaemonSetEvent"]:
try:
obj = DaemonSet.readNamespacedDaemonSet(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load DaemonSet {params}", exc_info=True)
return None
return DaemonSetEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class DaemonSetChangeEvent(DaemonSetEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1DaemonSet]] = None
old_obj: Optional[Union[v1DaemonSet]] = None
def get_daemonset(self) -> Optional[Union[v1DaemonSet]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class DeploymentAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class DeploymentEvent(KubernetesResourceEvent):
def __init__(self, obj: RobustaDeployment, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_deployment(self) -> Optional[RobustaDeployment]:
return self.obj
@staticmethod
def from_params(params: DeploymentAttributes) -> Optional["DeploymentEvent"]:
try:
obj = RobustaDeployment.readNamespacedDeployment(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load Deployment {params}", exc_info=True)
return None
return DeploymentEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class DeploymentChangeEvent(DeploymentEvent, KubernetesAnyChangeEvent):
obj: Optional[RobustaDeployment] = None
old_obj: Optional[RobustaDeployment] = None
def get_deployment(self) -> Optional[RobustaDeployment]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class StatefulSetAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class StatefulSetEvent(KubernetesResourceEvent):
def __init__(self, obj: StatefulSet, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_statefulset(self) -> Optional[StatefulSet]:
return self.obj
@staticmethod
def from_params(params: StatefulSetAttributes) -> Optional["StatefulSetEvent"]:
try:
obj = StatefulSet.readNamespacedStatefulSet(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load StatefulSet {params}", exc_info=True)
return None
return StatefulSetEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class StatefulSetChangeEvent(StatefulSetEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1StatefulSet]] = None
old_obj: Optional[Union[v1StatefulSet]] = None
def get_statefulset(self) -> Optional[Union[v1StatefulSet]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class ServiceAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class ServiceEvent(KubernetesResourceEvent):
def __init__(self, obj: Service, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_service(self) -> Optional[Service]:
return self.obj
@staticmethod
def from_params(params: ServiceAttributes) -> Optional["ServiceEvent"]:
try:
obj = Service.readNamespacedService(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load Service {params}", exc_info=True)
return None
return ServiceEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class ServiceChangeEvent(ServiceEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1Service]] = None
old_obj: Optional[Union[v1Service]] = None
def get_service(self) -> Optional[Union[v1Service]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class EventAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class EventEvent(KubernetesResourceEvent):
def __init__(self, obj: Event, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_event(self) -> Optional[Event]:
return self.obj
@staticmethod
def from_params(params: EventAttributes) -> Optional["EventEvent"]:
try:
obj = Event.readNamespacedEvent(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load Event {params}", exc_info=True)
return None
return EventEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class EventChangeEvent(EventEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1Event]] = None
old_obj: Optional[Union[v1Event]] = None
def get_event(self) -> Optional[Union[v1Event]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class HorizontalPodAutoscalerAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class HorizontalPodAutoscalerEvent(KubernetesResourceEvent):
def __init__(self, obj: HorizontalPodAutoscaler, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_horizontalpodautoscaler(self) -> Optional[HorizontalPodAutoscaler]:
return self.obj
@staticmethod
def from_params(params: HorizontalPodAutoscalerAttributes) -> Optional["HorizontalPodAutoscalerEvent"]:
try:
obj = HorizontalPodAutoscaler.readNamespacedHorizontalPodAutoscaler(
name=params.name, namespace=params.namespace
).obj
except Exception:
logging.error(f"Could not load HorizontalPodAutoscaler {params}", exc_info=True)
return None
return HorizontalPodAutoscalerEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class HorizontalPodAutoscalerChangeEvent(HorizontalPodAutoscalerEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1HorizontalPodAutoscaler]] = None
old_obj: Optional[Union[v1HorizontalPodAutoscaler]] = None
def get_horizontalpodautoscaler(self) -> Optional[Union[v1HorizontalPodAutoscaler]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class NodeAttributes(ExecutionEventBaseParams):
name: str
@dataclass
class NodeEvent(KubernetesResourceEvent):
def __init__(self, obj: Node, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_node(self) -> Optional[Node]:
return self.obj
@staticmethod
def from_params(params: NodeAttributes) -> Optional["NodeEvent"]:
try:
obj = Node.readNode(name=params.name).obj
except Exception:
logging.error(f"Could not load Node {params}", exc_info=True)
return None
return NodeEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class NodeChangeEvent(NodeEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1Node]] = None
old_obj: Optional[Union[v1Node]] = None
def get_node(self) -> Optional[Union[v1Node]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class ClusterRoleAttributes(ExecutionEventBaseParams):
name: str
@dataclass
class ClusterRoleEvent(KubernetesResourceEvent):
def __init__(self, obj: ClusterRole, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_clusterrole(self) -> Optional[ClusterRole]:
return self.obj
@staticmethod
def from_params(params: ClusterRoleAttributes) -> Optional["ClusterRoleEvent"]:
try:
obj = ClusterRole.readClusterRole(name=params.name).obj
except Exception:
logging.error(f"Could not load ClusterRole {params}", exc_info=True)
return None
return ClusterRoleEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class ClusterRoleChangeEvent(ClusterRoleEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1ClusterRole]] = None
old_obj: Optional[Union[v1ClusterRole]] = None
def get_clusterrole(self) -> Optional[Union[v1ClusterRole]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class ClusterRoleBindingAttributes(ExecutionEventBaseParams):
name: str
@dataclass
class ClusterRoleBindingEvent(KubernetesResourceEvent):
def __init__(self, obj: ClusterRoleBinding, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_clusterrolebinding(self) -> Optional[ClusterRoleBinding]:
return self.obj
@staticmethod
def from_params(params: ClusterRoleBindingAttributes) -> Optional["ClusterRoleBindingEvent"]:
try:
obj = ClusterRoleBinding.readClusterRoleBinding(name=params.name).obj
except Exception:
logging.error(f"Could not load ClusterRoleBinding {params}", exc_info=True)
return None
return ClusterRoleBindingEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class ClusterRoleBindingChangeEvent(ClusterRoleBindingEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1ClusterRoleBinding]] = None
old_obj: Optional[Union[v1ClusterRoleBinding]] = None
def get_clusterrolebinding(self) -> Optional[Union[v1ClusterRoleBinding]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class JobAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class JobEvent(KubernetesResourceEvent):
def __init__(self, obj: RobustaJob, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_job(self) -> Optional[RobustaJob]:
return self.obj
@staticmethod
def from_params(params: JobAttributes) -> Optional["JobEvent"]:
try:
obj = RobustaJob.readNamespacedJob(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load Job {params}", exc_info=True)
return None
return JobEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class JobChangeEvent(JobEvent, KubernetesAnyChangeEvent):
obj: Optional[RobustaJob] = None
old_obj: Optional[RobustaJob] = None
def get_job(self) -> Optional[RobustaJob]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class NamespaceAttributes(ExecutionEventBaseParams):
name: str
@dataclass
class NamespaceEvent(KubernetesResourceEvent):
def __init__(self, obj: Namespace, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_namespace(self) -> Optional[Namespace]:
return self.obj
@staticmethod
def from_params(params: NamespaceAttributes) -> Optional["NamespaceEvent"]:
try:
obj = Namespace.readNamespace(name=params.name).obj
except Exception:
logging.error(f"Could not load Namespace {params}", exc_info=True)
return None
return NamespaceEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class NamespaceChangeEvent(NamespaceEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1Namespace]] = None
old_obj: Optional[Union[v1Namespace]] = None
def get_namespace(self) -> Optional[Union[v1Namespace]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class ServiceAccountAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class ServiceAccountEvent(KubernetesResourceEvent):
def __init__(self, obj: ServiceAccount, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_serviceaccount(self) -> Optional[ServiceAccount]:
return self.obj
@staticmethod
def from_params(params: ServiceAccountAttributes) -> Optional["ServiceAccountEvent"]:
try:
obj = ServiceAccount.readNamespacedServiceAccount(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load ServiceAccount {params}", exc_info=True)
return None
return ServiceAccountEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class ServiceAccountChangeEvent(ServiceAccountEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1ServiceAccount]] = None
old_obj: Optional[Union[v1ServiceAccount]] = None
def get_serviceaccount(self) -> Optional[Union[v1ServiceAccount]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class PersistentVolumeAttributes(ExecutionEventBaseParams):
name: str
@dataclass
class PersistentVolumeEvent(KubernetesResourceEvent):
def __init__(self, obj: PersistentVolume, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_persistentvolume(self) -> Optional[PersistentVolume]:
return self.obj
@staticmethod
def from_params(params: PersistentVolumeAttributes) -> Optional["PersistentVolumeEvent"]:
try:
obj = PersistentVolume.readPersistentVolume(name=params.name).obj
except Exception:
logging.error(f"Could not load PersistentVolume {params}", exc_info=True)
return None
return PersistentVolumeEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class PersistentVolumeChangeEvent(PersistentVolumeEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1PersistentVolume]] = None
old_obj: Optional[Union[v1PersistentVolume]] = None
def get_persistentvolume(self) -> Optional[Union[v1PersistentVolume]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class PersistentVolumeClaimAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class PersistentVolumeClaimEvent(KubernetesResourceEvent):
def __init__(self, obj: PersistentVolumeClaim, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_persistentvolumeclaim(self) -> Optional[PersistentVolumeClaim]:
return self.obj
@staticmethod
def from_params(params: PersistentVolumeClaimAttributes) -> Optional["PersistentVolumeClaimEvent"]:
try:
obj = PersistentVolumeClaim.readNamespacedPersistentVolumeClaim(
name=params.name, namespace=params.namespace
).obj
except Exception:
logging.error(f"Could not load PersistentVolumeClaim {params}", exc_info=True)
return None
return PersistentVolumeClaimEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class PersistentVolumeClaimChangeEvent(PersistentVolumeClaimEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1PersistentVolumeClaim]] = None
old_obj: Optional[Union[v1PersistentVolumeClaim]] = None
def get_persistentvolumeclaim(self) -> Optional[Union[v1PersistentVolumeClaim]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class NetworkPolicyAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class NetworkPolicyEvent(KubernetesResourceEvent):
def __init__(self, obj: NetworkPolicy, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_networkpolicy(self) -> Optional[NetworkPolicy]:
return self.obj
@staticmethod
def from_params(params: NetworkPolicyAttributes) -> Optional["NetworkPolicyEvent"]:
try:
obj = NetworkPolicy.readNamespacedNetworkPolicy(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load NetworkPolicy {params}", exc_info=True)
return None
return NetworkPolicyEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class NetworkPolicyChangeEvent(NetworkPolicyEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1NetworkPolicy]] = None
old_obj: Optional[Union[v1NetworkPolicy]] = None
def get_networkpolicy(self) -> Optional[Union[v1NetworkPolicy]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class ConfigMapAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class ConfigMapEvent(KubernetesResourceEvent):
def __init__(self, obj: ConfigMap, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_configmap(self) -> Optional[ConfigMap]:
return self.obj
@staticmethod
def from_params(params: ConfigMapAttributes) -> Optional["ConfigMapEvent"]:
try:
obj = ConfigMap.readNamespacedConfigMap(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load ConfigMap {params}", exc_info=True)
return None
return ConfigMapEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class ConfigMapChangeEvent(ConfigMapEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1ConfigMap]] = None
old_obj: Optional[Union[v1ConfigMap]] = None
def get_configmap(self) -> Optional[Union[v1ConfigMap]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
class IngressAttributes(ExecutionEventBaseParams):
name: str
namespace: str
@dataclass
class IngressEvent(KubernetesResourceEvent):
def __init__(self, obj: Ingress, named_sinks: List[str]):
super().__init__(obj=obj, named_sinks=named_sinks)
def get_ingress(self) -> Optional[Ingress]:
return self.obj
@staticmethod
def from_params(params: IngressAttributes) -> Optional["IngressEvent"]:
try:
obj = Ingress.readNamespacedIngress(name=params.name, namespace=params.namespace).obj
except Exception:
logging.error(f"Could not load Ingress {params}", exc_info=True)
return None
return IngressEvent(obj=obj, named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
@dataclass
class IngressChangeEvent(IngressEvent, KubernetesAnyChangeEvent):
obj: Optional[Union[v1Ingress]] = None
old_obj: Optional[Union[v1Ingress]] = None
def get_ingress(self) -> Optional[Union[v1Ingress]]:
return self.obj
def get_subject(self) -> FindingSubject:
return FindingSubject(
name=self.obj.metadata.name,
subject_type=FindingSubjectType.from_kind(self.obj.kind),
namespace=self.obj.metadata.namespace,
node=KubeObjFindingSubject.get_node_name(self.obj),
labels=self.obj.metadata.labels,
annotations=self.obj.metadata.annotations,
)
KIND_TO_EVENT_CLASS = {
"pod": PodChangeEvent,
"replicaset": ReplicaSetChangeEvent,
"daemonset": DaemonSetChangeEvent,
"deployment": DeploymentChangeEvent,
"statefulset": StatefulSetChangeEvent,
"service": ServiceChangeEvent,
"event": EventChangeEvent,
"horizontalpodautoscaler": HorizontalPodAutoscalerChangeEvent,
"node": NodeChangeEvent,
"clusterrole": ClusterRoleChangeEvent,
"clusterrolebinding": ClusterRoleBindingChangeEvent,
"job": JobChangeEvent,
"namespace": NamespaceChangeEvent,
"serviceaccount": ServiceAccountChangeEvent,
"persistentvolume": PersistentVolumeChangeEvent,
"persistentvolumeclaim": PersistentVolumeClaimChangeEvent,
"networkpolicy": NetworkPolicyChangeEvent,
"configmap": ConfigMapChangeEvent,
"ingress": IngressChangeEvent,
} | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/kubernetes/autogenerated/events.py | 0.817647 | 0.169131 | events.py | pypi |
import base64
import os
import tempfile
import uuid
from typing import List
from robusta.core.reporting.blocks import FileBlock
from robusta.core.reporting.utils import JPG_SUFFIX, PNG_SUFFIX, file_suffix_match, is_image
from robusta.integrations.msteams.msteams_elements.msteams_images import MsTeamsImages
class MsTeamsAdaptiveCardFilesImage:
@classmethod
def create_files_for_presentation(cls, file_blocks: List[FileBlock]) -> map:
encoded_images = []
image_file_blocks = [file_block for file_block in file_blocks if is_image(file_block.filename)]
for image_file_block in image_file_blocks:
encoded_images.append(
cls.__convert_bytes_to_base_64_url(image_file_block.filename, image_file_block.contents)
)
if len(encoded_images) == 0:
return []
return MsTeamsImages(encoded_images)
@classmethod
def __get_tmp_file_path(cls):
return os.path.join(tempfile.gettempdir(), str(uuid.uuid1()))
@classmethod
def __convert_bytes_to_base_64_url(cls, file_name: str, image_bytes: bytes):
if file_suffix_match(file_name, JPG_SUFFIX):
return cls.__jpg_convert_bytes_to_base_64_url(image_bytes)
if file_suffix_match(file_name, PNG_SUFFIX):
return cls.__png_convert_bytes_to_base_64_url(image_bytes)
return cls.__svg_convert_bytes_to_jpg(image_bytes)
@classmethod
def __jpg_convert_bytes_to_base_64_url(cls, jpg_bytes: bytes):
b64_string = base64.b64encode(jpg_bytes).decode("utf-8")
return "data:image/jpeg;base64,{0}".format(b64_string)
# msteams cant read parsing of url to 'data:image/png;base64,...
@classmethod
def __png_convert_bytes_to_base_64_url(cls, png_bytes: bytes):
from PIL import Image
png_file_path = cls.__get_tmp_file_path() + PNG_SUFFIX
jpg_file_path = cls.__get_tmp_file_path() + JPG_SUFFIX
with open(png_file_path, "wb") as f:
f.write(png_bytes)
im = Image.open(png_file_path)
rgb_im = im.convert("RGB")
rgb_im.save(jpg_file_path)
with open(jpg_file_path, "rb") as f:
jpg_bytes = f.read()
os.remove(png_file_path)
os.remove(jpg_file_path)
return cls.__jpg_convert_bytes_to_base_64_url(jpg_bytes)
# msteams cant read parsing of url to svg image
@classmethod
def __svg_convert_bytes_to_jpg(cls, svg_bytes: bytes):
from cairosvg import svg2png
return cls.__png_convert_bytes_to_base_64_url(svg2png(bytestring=svg_bytes)) | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/msteams/msteams_adaptive_card_files_image.py | 0.483892 | 0.197425 | msteams_adaptive_card_files_image.py | pypi |
import json
import logging
from typing import List
import requests
from robusta.core.reporting import (
FileBlock,
Finding,
FindingSeverity,
HeaderBlock,
KubernetesDiffBlock,
ListBlock,
MarkdownBlock,
TableBlock,
)
from robusta.core.reporting.base import FindingStatus
from robusta.integrations.msteams.msteams_adaptive_card_files import MsTeamsAdaptiveCardFiles
from robusta.integrations.msteams.msteams_elements.msteams_base import MsTeamsBase
from robusta.integrations.msteams.msteams_elements.msteams_card import MsTeamsCard
from robusta.integrations.msteams.msteams_elements.msteams_column import MsTeamsColumn
from robusta.integrations.msteams.msteams_elements.msteams_images import MsTeamsImages
from robusta.integrations.msteams.msteams_elements.msteams_table import MsTeamsTable
from robusta.integrations.msteams.msteams_elements.msteams_text_block import MsTeamsTextBlock
class MsTeamsMsg:
# actual size according to the DOC is ~28K.
# it's hard to determine the real size because for example there can be large images that doesn't count
# and converting the map to json doesn't give us an exact indication of the size so we need to take
# a safe zone of less then 28K
MAX_SIZE_IN_BYTES = 1024 * 20
def __init__(self, webhook_url: str):
self.entire_msg: List[MsTeamsBase] = []
self.current_section: List[MsTeamsBase] = []
self.text_file_containers = []
self.webhook_url = webhook_url
def write_title_and_desc(self, platform_enabled: bool, finding: Finding, cluster_name: str, account_id: str):
status: FindingStatus = (
FindingStatus.RESOLVED if finding.title.startswith("[RESOLVED]") else FindingStatus.FIRING
)
title = finding.title.removeprefix("[RESOLVED] ")
title = self.__build_msteams_title(title, status, finding.severity, finding.add_silence_url)
block = MsTeamsTextBlock(text=f"{title}", font_size="extraLarge")
self.__write_to_entire_msg([block])
if platform_enabled: # add link to the Robusta ui, if it's configured
silence_url = finding.get_prometheus_silence_url(account_id, cluster_name)
actions = f"[🔎 Investigate]({finding.get_investigate_uri(account_id, cluster_name)})"
if finding.add_silence_url:
actions = f"{actions} [🔕 Silence]({silence_url})"
for video_link in finding.video_links:
actions = f"{actions} [🎬 {video_link.name}]({video_link.url})"
self.__write_to_entire_msg([MsTeamsTextBlock(text=actions)])
self.__write_to_entire_msg([MsTeamsTextBlock(text=f"**Source:** *{cluster_name}*")])
if finding.description is not None:
block = MsTeamsTextBlock(text=finding.description)
self.__write_to_entire_msg([block])
@classmethod
def __build_msteams_title(
cls, title: str, status: FindingStatus, severity: FindingSeverity, add_silence_url: bool
) -> str:
status_str: str = f"{status.to_emoji()} {status.name.lower()} - " if add_silence_url else ""
return f"{status_str}{severity.to_emoji()} {severity.name} - **{title}**"
def write_current_section(self):
if len(self.current_section) == 0:
return
space_block = MsTeamsTextBlock(text=" ", font_size="small")
separator_block = MsTeamsTextBlock(text=" ", separator=True)
underline_block = MsTeamsColumn()
underline_block.add_column(items=[space_block, separator_block], width_stretch=True)
self.__write_to_entire_msg([underline_block])
self.__write_to_entire_msg(self.current_section)
self.current_section = []
def __write_to_entire_msg(self, blocks: List[MsTeamsBase]):
self.entire_msg += blocks
def __write_to_current_section(self, blocks: List[MsTeamsBase]):
self.current_section += blocks
def __sub_section_separator(self):
if len(self.current_section) == 0:
return
space_block = MsTeamsTextBlock(text=" ", font_size="small")
separator_block = MsTeamsTextBlock(text="_" * 30, font_size="small", horizontal_alignment="center")
self.__write_to_current_section([space_block, separator_block, space_block, space_block])
def upload_files(self, file_blocks: List[FileBlock]):
msteams_files = MsTeamsAdaptiveCardFiles()
block_list: List[MsTeamsBase] = msteams_files.upload_files(file_blocks)
if len(block_list) > 0:
self.__sub_section_separator()
self.text_file_containers += msteams_files.get_text_files_containers_list()
self.__write_to_current_section(block_list)
def table(self, table_block: TableBlock):
blocks: List[MsTeamsBase] = []
if table_block.table_name:
blocks.append(MsTeamsTextBlock(table_block.table_name))
blocks.append(MsTeamsTable(list(table_block.headers), table_block.render_rows(), table_block.column_width))
self.__write_to_current_section(blocks)
def items_list(self, block: ListBlock):
self.__sub_section_separator()
for line in block.items:
bullet_lines = "\n- " + line + "\n"
self.__write_to_current_section([MsTeamsTextBlock(bullet_lines)])
def diff(self, block: KubernetesDiffBlock):
rows = [f"*{diff.formatted_path}*: {diff.other_value} -> {diff.value}" for diff in block.diffs]
list_blocks = ListBlock(rows)
self.items_list(list_blocks)
def markdown_block(self, block: MarkdownBlock):
if not block.text:
return
self.__write_to_current_section([MsTeamsTextBlock(block.text)])
def divider_block(self):
self.__write_to_current_section([MsTeamsTextBlock("\n\n")])
def header_block(self, block: HeaderBlock):
current_header_string = block.text + "\n\n"
self.__write_to_current_section([MsTeamsTextBlock(current_header_string, font_size="large")])
# dont include the base 64 images in the total size calculation
def _put_text_files_data_up_to_max_limit(self, complete_card_map: map):
curr_images_len = 0
for element in self.entire_msg:
if isinstance(element, MsTeamsImages):
curr_images_len += element.get_images_len_in_bytes()
max_len_left = self.MAX_SIZE_IN_BYTES - (self.__get_current_card_len(complete_card_map) - curr_images_len)
curr_line = 0
while True:
line_added = False
curr_line += 1
for text_element, lines in self.text_file_containers:
if len(lines) < curr_line:
continue
line = lines[len(lines) - curr_line]
max_len_left -= len(line)
if max_len_left < 0:
return
new_text_value = line + text_element.get_text_from_block()
text_element.set_text_from_block(new_text_value)
line_added = True
if not line_added:
return
def send(self):
try:
complete_card_map: dict = MsTeamsCard(self.entire_msg).get_map_value()
self._put_text_files_data_up_to_max_limit(complete_card_map)
response = requests.post(self.webhook_url, json=complete_card_map)
if response.status_code not in [200, 201]:
logging.error(f"Error sending to ms teams json: {complete_card_map} error: {response.reason}")
if response.text and "error" in response.text.lower(): # teams error indication is in the text only :(
logging.error(f"Failed to send message to teams. error: {response.text} message: {complete_card_map}")
except Exception as e:
logging.error(f"error sending message to msteams\ne={e}\n")
@classmethod
def __get_current_card_len(cls, complete_card_map: dict):
return len(json.dumps(complete_card_map, ensure_ascii=True, indent=2)) | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/msteams/msteams_msg.py | 0.688678 | 0.200832 | msteams_msg.py | pypi |
import logging
from robusta.core.reporting import (
BaseBlock,
CallbackBlock,
DividerBlock,
Enrichment,
FileBlock,
Finding,
HeaderBlock,
KubernetesDiffBlock,
ListBlock,
MarkdownBlock,
TableBlock,
)
from robusta.integrations.msteams.msteams_msg import MsTeamsMsg
class MsTeamsSender:
@classmethod
def __to_ms_teams(cls, block: BaseBlock, msg: MsTeamsMsg):
if isinstance(block, MarkdownBlock):
msg.markdown_block(block)
elif isinstance(block, DividerBlock):
msg.divider_block()
elif isinstance(block, HeaderBlock):
msg.header_block(block)
elif isinstance(block, TableBlock):
msg.table(block)
elif isinstance(block, ListBlock):
msg.items_list(block)
elif isinstance(block, KubernetesDiffBlock):
msg.diff(block)
elif isinstance(block, CallbackBlock):
logging.error("CallbackBlock not supported for msteams")
else:
logging.error(f"cannot convert block of type {type(block)} to msteams format block: {block}")
@classmethod
def __split_block_to_files_and_all_the_rest(cls, enrichment: Enrichment):
files_blocks = []
other_blocks = []
for block in enrichment.blocks:
if isinstance(block, FileBlock):
files_blocks.append(block)
else:
other_blocks.append(block)
return files_blocks, other_blocks
@classmethod
def send_finding_to_ms_teams(
cls, webhook_url: str, finding: Finding, platform_enabled: bool, cluster_name: str, account_id: str
):
msg = MsTeamsMsg(webhook_url)
msg.write_title_and_desc(platform_enabled, finding, cluster_name, account_id)
for enrichment in finding.enrichments:
files_blocks, other_blocks = cls.__split_block_to_files_and_all_the_rest(enrichment)
for block in other_blocks:
cls.__to_ms_teams(block, msg)
msg.upload_files(files_blocks)
msg.write_current_section()
msg.send() | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/msteams/sender.py | 0.530966 | 0.184602 | sender.py | pypi |
import uuid
from typing import List
from robusta.core.reporting import FileBlock
from robusta.integrations.msteams.msteams_elements.msteams_action import MsTeamsAction
from robusta.integrations.msteams.msteams_elements.msteams_column import MsTeamsColumn
from robusta.integrations.msteams.msteams_elements.msteams_container import MsTeamsContainer
from robusta.integrations.msteams.msteams_elements.msteams_text_block import MsTeamsTextBlock
"""
there are 3 elements for each text file":
1. row that contains 'open file-name' , and 'close file-name' buttons.
the open is visible and the close is invisible at init stage
2. container that contains the last lines of the file - invisible at the init stage
3. row that contains 'close file-name' buttons, invisible at init stage
when a open button is pressed for specific file, the close file for the current file become visible,
and also the container presenting the file text. for all the other files, the 'open file-name' buttons
become visible and all the other buttons become invisible
no point in creating object for each file since it will only hold a few variables such as the file name
and keys and the logic is mainly to create the columns and text containers for all the files combine
"""
class MsTeamsAdaptiveCardFilesText:
def __init__(self):
self.open_key_list = []
self.close_start_key_list = []
self.close_end_key_list = []
self.text_file_presentation_key_list = []
self.open_text_list = []
self.close_start_text_list = []
self.close_end_text_list = []
self.text_file_presentation_list = []
self.action_open_text_list = []
self.action_close_start_text_list = []
self.action_close_end_text_list = []
self.file_name_list = []
self.text_map_and_single_text_lines_list = []
def create_files_for_presentation(self, file_blocks: List[FileBlock]) -> List[map]:
file_content_list = []
for file_block in file_blocks:
if not self.__is_txt_file(file_block.filename):
continue
self.__create_new_keys()
self.file_name_list.append(file_block.filename)
file_content_list.append(file_block.contents)
if len(self.open_key_list) == 0:
return []
for index in range(len(self.open_key_list)):
self.__manage_blocks_for_single_file(index, self.file_name_list[index], file_content_list[index])
return self.__manage_all_text_to_send()
def get_text_files_containers_list(self):
return self.text_map_and_single_text_lines_list
def __create_new_keys(self):
self.open_key_list.append(str(uuid.uuid4()))
self.close_start_key_list.append(str(uuid.uuid4()))
self.close_end_key_list.append(str(uuid.uuid4()))
self.text_file_presentation_key_list.append(str(uuid.uuid4()))
def __manage_blocks_for_single_file(self, index, file_name: str, content: bytes):
open_text_action = self.__action(index, open=True, title="press to open")
close_text_action = self.__action(index, open=False, title="press to close")
open_text = MsTeamsTextBlock("***open " + file_name + "***", is_subtle=False)
close_start = MsTeamsTextBlock("***close " + file_name + "***", is_subtle=False)
close_end = MsTeamsTextBlock("***close " + file_name + "***", is_subtle=False)
self.open_text_list.append(open_text)
self.close_start_text_list.append(close_start)
self.close_end_text_list.append(close_end)
self.action_open_text_list.append(open_text_action)
self.action_close_start_text_list.append(close_text_action)
self.action_close_end_text_list.append(close_text_action)
self.text_file_presentation_list.append(
self.__present_text_file_block(self.text_file_presentation_key_list[index], content.decode("utf-8"))
)
def __manage_all_text_to_send(self):
top_column_set = MsTeamsColumn()
bottom_column_set = MsTeamsColumn()
for index in range(len(self.open_text_list)):
top_column_set.add_column(
key=self.open_key_list[index],
items=[self.open_text_list[index]],
action=self.action_open_text_list[index],
)
top_column_set.add_column(
is_visible=False,
key=self.close_start_key_list[index],
items=[self.close_start_text_list[index]],
action=self.action_close_start_text_list[index],
)
# spaces between files
top_column_set.add_column(items=[MsTeamsTextBlock(" ")])
top_column_set.add_column(items=[MsTeamsTextBlock(" ")])
bottom_column_set.add_column(
is_visible=False,
key=self.close_end_key_list[index],
items=[self.close_end_text_list[index]],
action=self.action_close_end_text_list[index],
)
list_to_return = [top_column_set]
list_to_return += self.text_file_presentation_list
list_to_return.append(bottom_column_set)
return list_to_return
def __action(self, index, open: bool, title: str) -> map:
visible_elements_map = {False: [], True: []}
curr_key = self.open_key_list[index]
for key in self.open_key_list:
visible = (not open) or (curr_key != key)
visible_elements_map[visible].append(key)
curr_key = self.close_start_key_list[index]
for key in self.close_start_key_list:
visible = open and (curr_key == key)
visible_elements_map[visible].append(key)
curr_key = self.close_end_key_list[index]
for key in self.close_end_key_list:
visible = open and (curr_key == key)
visible_elements_map[visible].append(key)
curr_key = self.text_file_presentation_key_list[index]
for key in self.text_file_presentation_key_list:
visible = open and (curr_key == key)
visible_elements_map[visible].append(key)
return MsTeamsAction(title, visible_keys=visible_elements_map[True], invisible_keys=visible_elements_map[False])
# there is a limit to the number of letters you can write - dont know what it is !!!
# /t doesn't work so we need to simulate spaces (which are trimmed so we use '. . . ')
def __present_text_file_block(self, key: str, text: str):
text_lines_list = []
new_text = text.replace("\t", ". . . ")
for line in new_text.split("\n"):
text_lines_list.append(line + "\n\n")
# will be completed later
text_block = MsTeamsTextBlock("", wrap=True, weight="bolder", is_visible=True)
self.text_map_and_single_text_lines_list.append([text_block, text_lines_list])
return MsTeamsContainer(key=key, elements=[text_block])
@classmethod
def __is_txt_file(cls, file_name: str) -> bool:
txt_suffixes = [".txt", ".json", ".yaml", ".log"]
for prefix in txt_suffixes:
if file_name.lower().endswith(prefix):
return True
return False | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/msteams/msteams_adaptive_card_files_text.py | 0.534491 | 0.198821 | msteams_adaptive_card_files_text.py | pypi |
from robusta.integrations.msteams.msteams_elements.msteams_base import MsTeamsBase
from robusta.integrations.msteams.msteams_mark_down_fix_url import MsTeamsMarkDownFixUrl
class MsTeamsTextBlock(MsTeamsBase):
def __init__(
self,
text: str,
is_subtle: bool = None,
wrap: bool = None,
weight: str = None,
is_visible: bool = True,
separator: bool = False,
font_size: str = "medium",
horizontal_alignment: str = "left",
):
super().__init__(
self.__text_block(
text,
is_subtle,
wrap,
weight,
is_visible,
separator,
font_size,
horizontal_alignment,
)
)
def __to_msteams_text(self, text: str) -> str:
teams_text = MsTeamsMarkDownFixUrl().fix_text(str(text))
teams_text = teams_text.replace("```", "")
return teams_text
def __text_block(
self,
text: str,
is_subtle: bool = None,
wrap: bool = None,
weight: str = None,
is_visible: bool = True,
separator: bool = False,
font_size: str = "medium",
horizontal_alignment: str = "left",
):
self.block = {
"type": "TextBlock",
"text": self.__to_msteams_text(text),
"size": font_size,
"wrap": True,
}
if not is_visible:
self.block["isVisible"] = is_visible
if separator:
self.block["separator"] = separator
if horizontal_alignment != "left":
self.block["horizontalAlignment"] = horizontal_alignment
if is_subtle is not None:
self.block["isSubtle"] = is_subtle
if weight is not None:
self.block["weight"] = weight
return self.block
def get_text_from_block(self) -> str:
return self.block["text"]
def set_text_from_block(self, text: str):
self.block["text"] = text | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/msteams/msteams_elements/msteams_text_block.py | 0.846419 | 0.247152 | msteams_text_block.py | pypi |
import logging
import re
from enum import Enum
from itertools import chain
from typing import Dict, List, Tuple, Union
import requests
from robusta.core.model.env_vars import DISCORD_TABLE_COLUMNS_LIMIT, ROBUSTA_LOGO_URL
from robusta.core.reporting import (
BaseBlock,
FileBlock,
Finding,
FindingSeverity,
HeaderBlock,
KubernetesDiffBlock,
ListBlock,
MarkdownBlock,
TableBlock,
)
from robusta.core.reporting.utils import add_pngs_for_all_svgs
from robusta.core.sinks.discord.discord_sink_params import DiscordSinkParams
from robusta.core.sinks.transformer import Transformer
from robusta.core.reporting.base import FindingStatus
SEVERITY_EMOJI_MAP = {
FindingSeverity.HIGH: ":red_circle:",
FindingSeverity.MEDIUM: ":orange_circle:",
FindingSeverity.LOW: ":yellow_circle:",
FindingSeverity.INFO: ":green_circle:",
}
# use hex to decimal converter, eg: https://www.rapidtables.com/convert/number/hex-to-decimal.html
SEVERITY_COLOR_MAP = {
FindingSeverity.HIGH: "14495556",
FindingSeverity.MEDIUM: "16027661",
FindingSeverity.LOW: "16632664",
FindingSeverity.INFO: "7909721",
}
MAX_BLOCK_CHARS = 2048 # Max allowed characters for discord per one embed
MAX_FIELD_CHARS = 1024 # Max allowed characters for discord per one 'field type' embed
BLANK_CHAR = "\u200b" # Discord does not allow us to send empty strings, so we use blank char instead
class DiscordBlock(BaseBlock):
def to_msg(self) -> Dict:
return {}
class DiscordDescriptionBlock(DiscordBlock):
"""
Discord description block
"""
description: str
def to_msg(self) -> Dict:
return {"description": Transformer.apply_length_limit(self.description, MAX_BLOCK_CHARS)}
class DiscordHeaderBlock(DiscordBlock):
"""
Discord description block
"""
content: str
def to_msg(self) -> Dict:
return {"content": self.content}
class DiscordFieldBlock(DiscordBlock):
"""
Discord field block
"""
name: str
value: str
inline: bool
def __init__(self, name: str, value: str, inline: bool = False):
value = Transformer.apply_length_limit(value, MAX_FIELD_CHARS)
super().__init__(name=name, value=value, inline=inline)
def to_msg(self) -> Dict:
return {"name": self.name, "value": self.value, "inline": self.inline}
def _add_color_to_block(block: Dict, msg_color: str):
return {**block, "color": msg_color}
class DiscordSender:
def __init__(self, url: str, account_id: str, cluster_name: str, discord_params: DiscordSinkParams):
"""
Set the Discord webhook url.
"""
self.url = url
self.cluster_name = cluster_name
self.account_id = account_id
self.discord_params = discord_params
@classmethod
def __add_discord_title(cls, title: str, status: FindingStatus, severity: FindingSeverity) -> str:
icon = SEVERITY_EMOJI_MAP.get(severity, "")
return f"{status.to_emoji()} {status.name.lower()} - {icon} {severity.name} - **{title}**"
@staticmethod
def __extract_markdown_name(block: MarkdownBlock):
title = BLANK_CHAR
text = block.text
regex = re.compile(r"\*.+\*")
match = re.match(regex, block.text)
if match:
title = text[match.span()[0]: match.span()[1]]
text = text[match.span()[1]:]
return title, DiscordSender.__transform_markdown_links(text) or BLANK_CHAR
@staticmethod
def __transform_markdown_links(text: str) -> str:
return Transformer.to_github_markdown(text, add_angular_brackets=False)
@staticmethod
def __format_final_message(discord_blocks: List[DiscordBlock], msg_color: Union[str, int]) -> Dict:
header_block = next((block.to_msg() for block in discord_blocks if isinstance(block, DiscordHeaderBlock)), {})
fields = [block.to_msg() for block in discord_blocks if isinstance(block, DiscordFieldBlock)]
discord_msg = {
"username": "Robusta",
"avatar_url": ROBUSTA_LOGO_URL,
"embeds": [
*[
_add_color_to_block(block.to_msg(), msg_color)
for block in discord_blocks
if isinstance(block, DiscordDescriptionBlock)
]
],
**_add_color_to_block(header_block, msg_color),
}
if fields:
discord_msg["embeds"].append({"fields": fields, "color": msg_color})
return discord_msg
def __to_discord_diff(self, block: KubernetesDiffBlock, sink_name: str) -> List[DiscordBlock]:
transformed_blocks = Transformer.to_markdown_diff(block, use_emoji_sign=True)
_blocks = list(
chain(*[self.__to_discord(transformed_block, sink_name) for transformed_block in transformed_blocks])
)
return _blocks
def __to_discord(self, block: BaseBlock, sink_name: str) -> List[Union[DiscordBlock, Tuple]]:
if isinstance(block, MarkdownBlock):
if not block.text:
return []
name, value = self.__extract_markdown_name(block)
return [
DiscordFieldBlock(
name=name or BLANK_CHAR, value=Transformer.apply_length_limit(value, MAX_FIELD_CHARS) or BLANK_CHAR
)
]
elif isinstance(block, FileBlock):
return [(block.filename, (block.filename, block.contents))]
elif isinstance(block, DiscordFieldBlock):
return [DiscordFieldBlock(name=block.name, value=block.value, inline=block.inline)]
elif isinstance(block, HeaderBlock):
return [
DiscordHeaderBlock(
content=Transformer.apply_length_limit(block.text, 150),
)
]
elif isinstance(block, DiscordDescriptionBlock):
return [
DiscordDescriptionBlock(
description=Transformer.apply_length_limit(block.description, MAX_BLOCK_CHARS),
)
]
elif isinstance(block, TableBlock):
return self.__to_discord(
DiscordFieldBlock(
name=block.table_name,
value=block.to_markdown(max_chars=MAX_BLOCK_CHARS, add_table_header=False).text,
),
sink_name,
)
elif isinstance(block, ListBlock):
return self.__to_discord(block.to_markdown(), sink_name)
elif isinstance(block, KubernetesDiffBlock):
return self.__to_discord_diff(block, sink_name)
else:
logging.warning(f"cannot convert block of type {type(block)} to discord format block: {block}")
return [] # no reason to crash the entire report
def __send_blocks_to_discord(
self,
report_blocks: List[BaseBlock],
title: str,
status: FindingStatus,
severity: FindingSeverity,
msg_color: str,
):
# Process attachment blocks
file_blocks = add_pngs_for_all_svgs([b for b in report_blocks if isinstance(b, FileBlock)])
if not self.discord_params.send_svg:
file_blocks = [b for b in file_blocks if not b.filename.endswith(".svg")]
attachment_blocks = []
for block in file_blocks:
attachment_blocks.extend(self.__to_discord(block, self.discord_params.name))
other_blocks = [b for b in report_blocks if not isinstance(b, FileBlock)]
output_blocks = []
if title:
title = self.__add_discord_title(title=title, status=status, severity=severity)
output_blocks.extend(self.__to_discord(HeaderBlock(title), self.discord_params.name))
for block in other_blocks:
output_blocks.extend(self.__to_discord(block, self.discord_params.name))
discord_msg = self.__format_final_message(output_blocks, msg_color)
logging.debug(
f"--sending to discord--\n"
f"title:{title}\n"
f"blocks: {output_blocks}\n"
f"discord_msg: {discord_msg}\n"
f"attachment_blocks: {attachment_blocks}\n"
)
try:
response = requests.post(self.url, json=discord_msg)
response.raise_for_status()
if attachment_blocks:
response = requests.post(
self.url,
data={
"username": discord_msg["username"],
"avatar_url": ROBUSTA_LOGO_URL,
},
files=attachment_blocks,
)
response.raise_for_status()
except Exception as e:
logging.error(
f"""error sending message to discord\ne={e}\n
blocks={output_blocks}\nattachment_blocks={attachment_blocks}\nmsg={discord_msg}"""
)
else:
logging.debug("Message was delivered successfully")
def send_finding_to_discord(
self,
finding: Finding,
platform_enabled: bool,
):
blocks: List[BaseBlock] = []
if platform_enabled: # add link to the robusta ui, if it's configured
actions = f"[:mag_right: Investigate]({finding.get_investigate_uri(self.account_id, self.cluster_name)})"
if finding.add_silence_url:
actions = f"{actions} [:no_bell: Silence]({finding.get_prometheus_silence_url(self.account_id, self.cluster_name)})"
for video_link in finding.video_links:
actions = f"{actions} [:clapper: {video_link.name}]({video_link.url})"
blocks.append(DiscordDescriptionBlock(description=actions))
blocks.append(DiscordFieldBlock(name="Source", value=f"`{self.cluster_name}`"))
# first add finding description block
if finding.description:
blocks.append(DiscordFieldBlock(name="Description", value=finding.description))
for enrichment in finding.enrichments:
blocks.extend(enrichment.blocks)
# wide tables aren't displayed properly on discord. looks better in a text file
table_blocks = [b for b in blocks if isinstance(b, TableBlock)]
for table_block in table_blocks:
table_content = table_block.to_table_string()
max_table_size = MAX_FIELD_CHARS - 6 # add code markdown characters
if len(table_block.headers) > DISCORD_TABLE_COLUMNS_LIMIT or len(table_content) > max_table_size:
table_content = table_block.to_table_string(table_max_width=250) # bigger max width for file
table_name = table_block.table_name if table_block.table_name else "data"
blocks.remove(table_block)
blocks.append(FileBlock(f"{table_name}.txt", bytes(table_content, "utf-8")))
status: FindingStatus = (
FindingStatus.RESOLVED if finding.title.startswith("[RESOLVED]") else FindingStatus.FIRING
)
msg_color = status.to_color_decimal()
title = finding.title.removeprefix("[RESOLVED] ")
self.__send_blocks_to_discord(
report_blocks=blocks,
title=title,
status=status,
severity=finding.severity,
msg_color=msg_color,
) | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/integrations/discord/sender.py | 0.789558 | 0.168378 | sender.py | pypi |
import os
import shlex
import subprocess
import time
from contextlib import contextmanager
from typing import List, Optional
import click_spinner
import requests
import toml
import typer
from dpath.util import get
PLAYBOOKS_DIR = "playbooks/"
def namespace_to_kubectl(namespace: Optional[str]):
if namespace is None:
return ""
else:
return f"-n {namespace}"
def exec_in_robusta_runner(
cmd,
namespace: Optional[str],
tries=1,
time_between_attempts=10,
error_msg="error running cmd",
dry_run: bool = False,
):
exec_cmd = _build_exec_command(cmd, namespace)
if dry_run:
typer.echo(f"Run the following command:\n {shlex.join(exec_cmd)}")
return
typer.echo(f"running cmd: {cmd}")
for _ in range(tries - 1):
try:
return subprocess.check_call(exec_cmd)
except Exception:
typer.secho(f"error: {error_msg}", fg="red")
time.sleep(time_between_attempts)
return subprocess.check_call(cmd)
def exec_in_robusta_runner_output(command: str, namespace: Optional[str]) -> Optional[bytes]:
exec_cmd = _build_exec_command(command, namespace)
result = subprocess.check_output(exec_cmd)
return result
def _build_exec_command(command: str, namespace: Optional[str]) -> List[str]:
exec_cmd = [
"kubectl",
"exec",
"-it",
get_runner_pod(namespace),
"-c",
"runner",
]
if namespace is not None:
exec_cmd += ["-n", namespace]
exec_cmd += ["--", "bash", "-c", command]
return exec_cmd
def download_file(url, local_path):
with click_spinner.spinner():
response = requests.get(url)
response.raise_for_status()
with open(local_path, "wb") as f:
f.write(response.content)
def log_title(title, color=None):
typer.echo("=" * 70)
typer.secho(title, fg=color)
typer.echo("=" * 70)
def replace_in_file(path, original, replacement):
with open(path) as r:
text = r.read()
if original not in text:
raise Exception(f"Cannot replace text {original} in file {path} because it was not found")
text = text.replace(original, replacement)
with open(path, "w") as w:
w.write(text)
@contextmanager
def fetch_runner_logs(namespace: Optional[str], all_logs=False):
start = time.time()
try:
yield
finally:
log_title("Fetching logs...")
try:
if all_logs:
subprocess.check_call(
f"kubectl logs {namespace_to_kubectl(namespace)} {get_runner_pod(namespace)} -c runner",
shell=True,
)
else:
subprocess.check_call(
f"kubectl logs {namespace_to_kubectl(namespace)} {get_runner_pod(namespace)} -c runner --since={int(time.time() - start + 1)}s",
shell=True,
)
except Exception:
log_title("Cannot fetch logs. robusta-runner not found", color="red")
return
def get_package_name(playbooks_dir: str) -> str:
with open(os.path.join(playbooks_dir, "pyproject.toml"), "r") as pyproj_toml:
data = pyproj_toml.read()
parsed = toml.loads(data)
return get(parsed, "tool/poetry/name", default="")
def get_runner_pod(namespace: Optional[str]) -> str:
output = subprocess.run(
f"kubectl get pods {namespace_to_kubectl(namespace)} "
f'--selector="robustaComponent=runner" '
f"--field-selector=status.phase==Running "
f"--no-headers "
f'-o custom-columns=":metadata.name"',
shell=True,
text=True,
capture_output=True,
).stdout.strip()
if not output:
typer.secho(f"Could not find robusta pod in namespace {namespace}. Are you missing the --namespace flag correctly?", fg="red")
return output | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/cli/utils.py | 0.57332 | 0.20466 | utils.py | pypi |
import json
import secrets
import string
import time
from typing import Any, Dict, Optional
import jwt as JWT
import typer
import yaml
from pydantic import BaseModel
from robusta.cli.backend_profile import BackendProfile
ISSUER: str = "supabase"
def issued_at() -> int:
return int(time.time())
def gen_secret(length: int) -> str:
return "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(length))
def write_values_files(
values_path: str,
backendconfig_path: str,
values: Dict[str, Any],
backendProfile: BackendProfile,
):
with open(values_path, "w") as output_file:
yaml.safe_dump(values, output_file, sort_keys=False)
typer.secho(
f"Saved configuration to {values_path} - save this file for future use!",
fg="red",
)
with open(backendconfig_path, "w") as output_file:
json.dump(backendProfile.dict(exclude={"custom_profile"}), output_file, indent=1)
typer.secho(
f"Saved configuration to {backendconfig_path} - save this file for future use!",
fg="red",
)
class RobustaUI(BaseModel):
RELAY_HOST: str
SUPABASE_URL: str
SUPABASE_KEY: str
service = {"nodePort": 30311} # platform.domain
def __init__(self, domain: str, anon_key: str):
super().__init__(
RELAY_HOST=f"https://api.{domain}",
SUPABASE_URL=f"https://db.{domain}",
SUPABASE_KEY=anon_key,
)
class RobustaRelay(BaseModel):
domain: str
storePassword: str
storeUser: str = "apiuser-robustarelay@robusta.dev"
storeUrl: str
storeApiKey: str # anon key
slackClientId: str = "your-client-id"
slackClientSecret: str = "your-client-secret"
slackSigningSecret: str = "your-signing-secret"
syncActionAllowedOrigins: str
provider: str
apiNodePort: int = 30313 # api.domain
wsNodePort: int = 30314 # relay.domain
def __init__(self, domain: str, anon_key: str, provider: str, storePW: str):
super().__init__(
domain=domain,
storeUrl=f"https://db.{domain}",
syncActionAllowedOrigins=f"https://platform.{domain}",
storeApiKey=anon_key,
provider=provider,
storePassword=storePW,
)
class SelfHostValues(BaseModel):
STATIC_IP_NAME: str = "robusta-platform-ip"
RELAY_PASSWORD: str = gen_secret(12)
RELAY_USER: str = "apiuser-robustarelay@robusta.dev"
DOMAIN: str
PROVIDER: str
# SUPABASE
JWT_SECRET: str = gen_secret(32)
ANON_KEY: str = JWT.encode(
{"role": "anon", "iss": ISSUER, "iat": issued_at()},
JWT_SECRET,
)
SERVICE_ROLE_KEY: str = JWT.encode(
{
"role": "service_role",
"iss": ISSUER,
"iat": issued_at(),
},
JWT_SECRET,
)
SUPABASE_URL: str = "http://kong:8000" # Internal URL
PUBLIC_REST_URL: str # Studio Public REST endpoint - replace this if you intend to use Studio outside of localhost
# POSTGRES
POSTGRES_PORT: int = 5432
POSTGRES_STORAGE: str = "100Gi"
POSTGRES_PASSWORD: str = gen_secret(12)
STORAGE_CLASS_NAME: Optional[str] = None
EKS_CERTIFICATE_ARN: Optional[str] = None
SITE_URL: str # callback target should point to the dash board
ADDITIONAL_REDIRECT_URLS: str = ""
# KONG API endpoint ports
KONG_HTTP_PORT: int = 8000
KONG_HTTP_NODE_PORT: int = 30312 # db.domain
KONG_HTTPS_PORT: int = 8443
enableRelay: bool = True
enableRobustaUI: bool = True
app = typer.Typer(add_completion=False)
@app.command()
def gen_config(
provider: str = typer.Option(
...,
help='Cloud host provider. options are "on-prem", "gke", "eks"',
),
domain: str = typer.Option(..., help="domain used to route the on-prem services."),
storage_class_name: str = typer.Option(None, help="database PVC storageClassName."),
eks_certificate_arn: str = typer.Option(None, help="certificate arn for EKS ingress"),
platform_nport: int = typer.Option(30311, help="node port for the Robusta dashboard."),
db_nport: int = typer.Option(30312, help="node port Robusta database."),
api_nport: int = typer.Option(30313, help="node port for Robusta API."),
ws_nport: int = typer.Option(30314, help="node port for Robusta websocket."),
):
"""Create self host configuration files"""
if provider not in {"on-prem", "gke", "eks"}:
typer.secho(
f'Invalid provider {provider}. options are "on-prem", "gke", "eks"',
fg=typer.colors.RED,
)
return
if not domain:
typer.secho(
"Missing required argument domain",
fg=typer.colors.RED,
)
return
values = SelfHostValues(
PROVIDER=provider,
DOMAIN=domain,
SITE_URL=f"https://platform.{domain}",
PUBLIC_REST_URL=f"https://db.{domain}/rest/v1/",
STORAGE_CLASS_NAME=storage_class_name,
EKS_CERTIFICATE_ARN=eks_certificate_arn,
)
values.KONG_HTTP_NODE_PORT = db_nport
relayValues = RobustaRelay(
domain=domain,
anon_key=values.ANON_KEY,
provider=provider,
storePW=values.RELAY_PASSWORD,
)
relayValues.apiNodePort = api_nport
relayValues.wsNodePort = ws_nport
uiValues = RobustaUI(domain=domain, anon_key=values.ANON_KEY)
uiValues.service["nodePort"] = platform_nport
values_dict = values.dict(exclude_none=True)
values_dict["robusta-ui"] = uiValues.dict()
values_dict["robusta-relay"] = relayValues.dict()
backendProfile = BackendProfile.fromDomain(domain=domain)
self_host_approval_url = "https://api.robusta.dev/terms-of-service.html"
typer.echo(f"By using this software you agree to the terms of service ({self_host_approval_url})\n")
write_values_files("self_host_values.yaml", "robusta_cli_config.json", values_dict, backendProfile) | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/cli/self_host.py | 0.711431 | 0.161883 | self_host.py | pypi |
import logging
from dataclasses import InitVar, is_dataclass
from inspect import getmodule, signature
from typing import Dict, List, Optional, Union, get_type_hints
from hikaru import HikaruBase, HikaruDocumentBase
from kubernetes.client.models.events_v1_event import EventsV1Event
from kubernetes.client.models.v1_container_image import V1ContainerImage
from ruamel.yaml import YAML
try:
from typing import get_args, get_origin
except ImportError: # pragma: no cover
def get_args(tp):
return tp.__args__ if hasattr(tp, "__args__") else ()
def get_origin(tp):
return tp.__origin__ if hasattr(tp, "__origin__") else None
NoneType = type(None)
def create_monkey_patches():
# The 2 patched Hikaru methods are very expensive CPU wise. We patched them, and using cached attributes
# on the hikaru class, so that we perform the expensive procedure only once
logging.info("Creating hikaru monkey patches")
HikaruBase.get_empty_instance = get_empty_instance
HikaruBase._get_hints = _get_hints
# The YAML method below is searching the file system for plugins, each time a parser is created
# We create many parser, and this is very inefficient.
# The plugins doesn't change during program execution.
# We added caching to search for the plugins only once
logging.info("Creating yaml monkey patch")
YAML.official_plug_ins = official_plug_ins
# The patched method is due to a bug in containerd that allows for containerImages to have no names
# which causes the kubernetes python api to throw an exception
logging.info("Creating kubernetes ContainerImage monkey patch")
EventsV1Event.event_time = EventsV1Event.event_time.setter(event_time)
def event_time(self, event_time):
self._event_time = event_time
def official_plug_ins(self):
return []
# hikaru meta.py monkey patch function
@classmethod
def get_empty_instance(cls):
"""
Returns a properly initialized instance with Nones and empty collections
:return: and instance of 'cls' with all scalar attrs set to None and
all collection attrs set to an appropriate empty collection
"""
kw_args = {}
# The 3 lines below are added, to use cached arguments to create the empty class instance
cached_args = getattr(cls, "cached_args", None)
if cached_args:
return cls(**cached_args)
sig = signature(cls.__init__)
init_var_hints = {k for k, v in get_type_hints(cls).items() if isinstance(v, InitVar) or v is InitVar}
hints = cls._get_hints()
for p in sig.parameters.values():
if p.name in ("self", "client") or p.name in init_var_hints:
continue
# skip these either of these next two since they are supplied by default,
# but only if they have default values
if p.name in ("apiVersion", "kind"):
if issubclass(cls, HikaruDocumentBase):
continue
f = hints[p.name]
initial_type = f
origin = get_origin(initial_type)
is_required = True
if origin is Union:
type_args = get_args(f)
initial_type = type_args[0]
is_required = False
if (
(type(initial_type) == type and issubclass(initial_type, (int, str, bool, float)))
or (is_dataclass(initial_type) and issubclass(initial_type, HikaruBase))
or initial_type is object
):
# this is a type that might default to None
# kw_args[p.name] = None
if is_required:
if is_dataclass(initial_type) and issubclass(initial_type, HikaruBase):
kw_args[p.name] = initial_type.get_empty_instance()
else:
kw_args[p.name] = ""
else:
kw_args[p.name] = None
else:
origin = get_origin(initial_type)
if origin in (list, List):
# ok, just stuffing an empty list in here can be a problem,
# as we don't know if this is going to then be put through
# get clean dict; if it's required, a clean dict will remove
# the list. So we need to put something inside this list so it
# doesn't get blown away. But ONLY if it's required
if is_required:
list_of_type = get_args(initial_type)[0]
if issubclass(list_of_type, HikaruBase):
kw_args[p.name] = [list_of_type.get_empty_instance()]
else:
kw_args[p.name] = [None]
else:
kw_args[p.name] = []
elif origin in (dict, Dict):
kw_args[p.name] = {}
else:
raise NotImplementedError(
f"Internal error! Unknown type"
f" {initial_type}"
f" for parameter {p.name} in"
f" {cls.__name__}. Please file a"
f" bug report."
) # pragma: no cover
new_inst = cls(**kw_args)
# Caching the empty instance creation args, to use next time we want to create an empty instance
cls.cached_args = kw_args
return new_inst
@classmethod
def _get_hints(cls) -> dict:
# The 3 lines below are added, to use cached hints
cached_hints = getattr(cls, "cached_hints", None)
if cached_hints:
return cached_hints
mro = cls.mro()
mro.reverse()
hints = {}
globs = vars(getmodule(cls))
for c in mro:
if is_dataclass(c):
hints.update(get_type_hints(c, globs))
# patching ContainerImage hint to allow the names to be None due to containerd bug
if cls.__name__ == "Event":
hints["eventTime"] = Optional[str]
# Caching the class hints for later use
cls.cached_hints = hints
return hints | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/patch/patch.py | 0.813942 | 0.249464 | patch.py | pypi |
import re
import urllib.parse
from collections import defaultdict
from typing import List, Optional
import markdown2
from fpdf import FPDF
from fpdf.fonts import FontFace
try:
from tabulate import tabulate
except ImportError:
def tabulate(*args, **kwargs):
raise ImportError("Please install tabulate to use the TableBlock")
from robusta.core.reporting import (
BaseBlock,
DividerBlock,
FileBlock,
HeaderBlock,
JsonBlock,
KubernetesDiffBlock,
ListBlock,
MarkdownBlock,
ScanReportBlock,
TableBlock,
)
class Transformer:
@staticmethod
def apply_length_limit(msg: str, max_length: int, truncator: Optional[str] = None) -> str:
"""
Method that crops the string if it is bigger than max_length provided.
Args:
msg: The string that needs to be truncated.
max_length: Max length of the string allowed
truncator: truncator string that will be appended, if max length is exceeded.
Examples:
>>> print(Transformer.apply_length_limit('1234567890', 9))
123456...
>>> print(Transformer.apply_length_limit('1234567890', 9, "."))
12345678.
Returns:
Croped string with truncator appended at the end if length is exceeded.
The original string otherwise
"""
if len(msg) <= max_length:
return msg
truncator = truncator or "..."
return msg[: max_length - len(truncator)] + truncator
@staticmethod
def to_markdown_diff(block: KubernetesDiffBlock, use_emoji_sign: bool = False) -> List[ListBlock]:
# this can happen when a block.old=None or block.new=None - e.g. the resource was added or deleted
if not block.diffs:
return []
divider = ":arrow_right:" if use_emoji_sign else "==>"
_blocks = []
_blocks.extend(ListBlock([f"*{d.formatted_path}*: {d.other_value} {divider} {d.value}" for d in block.diffs]))
return _blocks
@staticmethod
def get_markdown_links(markdown_data: str) -> List[str]:
regex = "<.*?\\|.*?>"
matches = re.findall(regex, markdown_data)
links = []
if matches:
links = [match for match in matches if len(match) > 1] # filter out illegal matches
return links
@staticmethod
def to_github_markdown(markdown_data: str, add_angular_brackets: bool = True) -> str:
"""Transform all occurrences of slack markdown, <URL|LINK TEXT>, to github markdown [LINK TEXT](URL)."""
# some markdown parsers doesn't support angular brackets on links
OPENING_ANGULAR = "<" if add_angular_brackets else ""
CLOSING_ANGULAR = ">" if add_angular_brackets else ""
matches = Transformer.get_markdown_links(markdown_data)
for match in matches:
# take only the data between the first '<' and last '>'
splits = match[1:-1].split("|")
if len(splits) == 2: # don't replace unexpected strings
parsed_url = urllib.parse.urlparse(splits[0])
parsed_url = parsed_url._replace(path=urllib.parse.quote_plus(parsed_url.path, safe="/"))
replacement = f"[{splits[1]}]({OPENING_ANGULAR}{parsed_url.geturl()}{CLOSING_ANGULAR})"
markdown_data = markdown_data.replace(match, replacement)
return re.sub(r"\*([^\*]*)\*", r"**\1**", markdown_data)
@classmethod
def __markdown_to_html(cls, mrkdwn_text: str) -> str:
# replace links: from <http://url|name> to <a href="url">name</a>
mrkdwn_links = re.findall(r"<[^\\|]*\|[^\>]*>", mrkdwn_text)
for link in mrkdwn_links:
link_content = link[1:-1]
link_parts = link_content.split("|")
mrkdwn_text = mrkdwn_text.replace(link, f'<a href="{link_parts[0]}">{link_parts[1]}</a>')
# replace slack markdown bold: from *bold text* to <b>bold text<b> (markdown2 converts this to italic)
mrkdwn_text = re.sub(r"\*([^\*]*)\*", r"<b>\1</b>", mrkdwn_text)
# Note - markdown2 should be used after slack links already converted, otherwise it's getting corrupted!
# Convert other markdown content
return markdown2.markdown(mrkdwn_text)
@classmethod
def to_html(cls, blocks: List[BaseBlock]) -> str:
lines = []
for block in blocks:
if isinstance(block, MarkdownBlock):
if not block.text:
continue
lines.append(f"{cls.__markdown_to_html(block.text)}")
elif isinstance(block, DividerBlock):
lines.append("-------------------")
elif isinstance(block, JsonBlock):
lines.append(block.json_str)
elif isinstance(block, KubernetesDiffBlock):
for diff in block.diffs:
lines.append(
cls.__markdown_to_html(f"*{'.'.join(diff.path)}*: {diff.other_value} ==> {diff.value}")
)
elif isinstance(block, HeaderBlock):
lines.append(f"<strong>{block.text}</strong>")
elif isinstance(block, ListBlock):
lines.extend(cls.__markdown_to_html(block.to_markdown().text))
elif isinstance(block, TableBlock):
if block.table_name:
lines.append(cls.__markdown_to_html(block.table_name))
lines.append(tabulate(block.render_rows(), headers=block.headers, tablefmt="html").replace("\n", ""))
return "\n".join(lines)
@classmethod
def to_standard_markdown(cls, blocks: List[BaseBlock]) -> str:
lines = []
for block in blocks:
if isinstance(block, MarkdownBlock):
if not block.text:
continue
lines.append(f"{cls.to_github_markdown(block.text, False)}")
elif isinstance(block, DividerBlock):
lines.append("-------------------")
elif isinstance(block, JsonBlock):
lines.append(block.json_str)
elif isinstance(block, KubernetesDiffBlock):
for diff in block.diffs:
lines.append(f"**{'.'.join(diff.path)}**: {diff.other_value} ==> {diff.value}")
elif isinstance(block, HeaderBlock):
lines.append(f"**{block.text}**")
elif isinstance(block, ListBlock):
lines.extend(cls.to_github_markdown(block.to_markdown().text, False))
elif isinstance(block, TableBlock):
if block.table_name:
lines.append(cls.to_github_markdown(block.table_name, False))
rendered_rows = block.render_rows()
lines.append(tabulate(rendered_rows, headers=block.headers, tablefmt="presto"))
return "\n".join(lines)
@staticmethod
def tableblock_to_fileblocks(blocks: List[BaseBlock], column_limit: int) -> List[FileBlock]:
file_blocks: List[FileBlock] = []
for table_block in [b for b in blocks if isinstance(b, TableBlock)]:
if len(table_block.headers) >= column_limit:
table_name = table_block.table_name if table_block.table_name else "data"
table_content = table_block.to_table_string(table_max_width=250) # bigger max width for file
file_blocks.append(FileBlock(f"{table_name}.txt", bytes(table_content, "utf-8")))
blocks.remove(table_block)
return file_blocks
@staticmethod
def scanReportBlock_to_fileblock(block: BaseBlock) -> BaseBlock:
if not isinstance(block, ScanReportBlock):
return block
accent_color = (140, 249, 209)
headers_color = (63, 63, 63)
table_color = (207, 215, 216)
def set_normal_test_style(pdf: FPDF):
pdf.set_font("", "", 8)
pdf.set_text_color(0, 0, 0)
def write_report_header(title: str, end_time, score: int, grade: str):
pdf.cell(pdf.w * 0.7, 10, f"**{title}** {end_time.strftime('%b %d, %y %X')}", border=0, markdown=True)
if int(score) >= 0:
pdf.cell(pdf.w * 0.3, 10, f"**{grade}** {score}", border=0, markdown=True)
pdf.ln(20)
def write_section_header(pdf: FPDF, header: str):
if pdf.will_page_break(50):
pdf.add_page()
pdf.ln(12)
pdf.set_font("", "B", 12)
pdf.set_text_color(headers_color)
pdf.cell(txt=header, border=0)
pdf.ln(12)
def write_config(pdf: FPDF, config: str):
pdf.set_text_color(accent_color)
pdf.cell(txt="config", border=0)
pdf.ln(12)
set_normal_test_style(pdf)
pdf.multi_cell(w=0, txt=config, border=0)
def write_table(pdf: FPDF, rows: list[list[str]]):
pdf.set_draw_color(table_color)
set_normal_test_style(pdf)
with pdf.table(
borders_layout="INTERNAL",
rows=rows,
headings_style=FontFace(color=(headers_color)),
col_widths=(10, 25, 25, 65),
markdown=True,
line_height=1.5 * pdf.font_size,
):
pass
scan: ScanReportBlock = block
pdf = FPDF(orientation="landscape", format="A4")
pdf.add_page()
pdf.set_font("courier", "", 18)
pdf.set_line_width(0.1)
pdf.c_margin = 2 # create default cell margin to add table "padding"
title = f"{scan.type.capitalize()} report"
write_report_header(title, scan.end_time, scan.score, scan.grade())
write_config(pdf, scan.config)
sections: dict[str, dict[str, List]] = defaultdict(lambda: defaultdict(list))
for item in scan.results:
sections[item.kind][f"{item.name}/{item.namespace}"].append(item)
for kind, grouped_issues in sections.items():
rows = [["Priority", "Name", "Namespace", "Issues"]]
for group, scanRes in grouped_issues.items():
n, ns = group.split("/", 1)
issue_txt = ""
max_priority: int = 0
for res in sorted(scanRes, key=lambda x: len(x.container)):
issue_txt += scan.pdf_scan_row_content_format(row=res)
max_priority = max(max_priority, res.priority)
rows.append([scan.pdf_scan_row_priority_format(max_priority), n, ns, issue_txt])
write_section_header(pdf, kind)
write_table(pdf, rows)
return FileBlock(f"{title}.pdf", pdf.output("", "S")) | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/core/sinks/transformer.py | 0.73077 | 0.272968 | transformer.py | pypi |
import base64
import json
import logging
import uuid
from datetime import datetime
from typing import Any, Dict
from robusta.core.reporting import (
CallbackBlock,
DividerBlock,
Enrichment,
EventsRef,
FileBlock,
Finding,
HeaderBlock,
JsonBlock,
KubernetesDiffBlock,
ListBlock,
MarkdownBlock,
PrometheusBlock,
TableBlock,
)
from robusta.core.reporting.callbacks import ExternalActionRequestBuilder
from robusta.core.sinks.transformer import Transformer
from robusta.utils.parsing import datetime_to_db_str
class ModelConversion:
@staticmethod
def to_finding_json(account_id: str, cluster_id: str, finding: Finding):
finding_json = {
"id": str(finding.id),
"title": finding.title,
"description": finding.description,
"source": finding.source.value,
"aggregation_key": finding.aggregation_key,
"failure": finding.failure,
"finding_type": finding.finding_type.value,
"category": finding.category,
"priority": finding.severity.name,
"subject_type": finding.subject.subject_type.value,
"subject_name": finding.subject.name,
"subject_namespace": finding.subject.namespace,
"subject_node": finding.subject.node,
"service_key": finding.service_key,
"cluster": cluster_id,
"account_id": account_id,
"video_links": [link.dict() for link in finding.video_links],
"starts_at": datetime_to_db_str(finding.starts_at),
"updated_at": datetime_to_db_str(datetime.now()),
}
if finding.creation_date:
finding_json["creation_date"] = finding.creation_date
if finding.ends_at:
finding_json["ends_at"] = datetime_to_db_str(finding.ends_at)
if finding.fingerprint: # currently only alerts supports fingerprint, and will be resolved
finding_json["fingerprint"] = finding.fingerprint
return finding_json
@staticmethod
def to_evidence_json(
account_id: str,
cluster_id: str,
sink_name: str,
signing_key: str,
finding_id: uuid.UUID,
enrichment: Enrichment,
) -> Dict[Any, Any]:
structured_data = []
for block in enrichment.blocks:
if isinstance(block, MarkdownBlock):
if not block.text:
continue
structured_data.append(
{
"type": "markdown",
"data": Transformer.to_github_markdown(block.text),
}
)
elif isinstance(block, DividerBlock):
structured_data.append({"type": "divider"})
elif isinstance(block, FileBlock):
last_dot_idx = block.filename.rindex(".")
structured_data.append(
{
"type": block.filename[last_dot_idx + 1 :],
"data": str(base64.b64encode(block.contents)),
}
)
elif isinstance(block, HeaderBlock):
structured_data.append({"type": "header", "data": block.text})
elif isinstance(block, ListBlock):
structured_data.append({"type": "list", "data": block.items})
elif isinstance(block, PrometheusBlock):
structured_data.append(
{"type": "prometheus", "data": block.data.dict(), "metadata": block.metadata, "version": 1.0}
)
elif isinstance(block, TableBlock):
if block.table_name:
structured_data.append(
{
"type": "markdown",
"data": Transformer.to_github_markdown(block.table_name),
}
)
structured_data.append(
{
"type": "table",
"data": {
"headers": block.headers,
"rows": [row for row in block.rows],
"column_renderers": block.column_renderers,
},
}
)
elif isinstance(block, KubernetesDiffBlock):
structured_data.append(
{
"type": "diff",
"data": {
"old": block.old,
"new": block.new,
"resource_name": block.resource_name,
"num_additions": block.num_additions,
"num_deletions": block.num_deletions,
"num_modifications": block.num_modifications,
"updated_paths": [d.formatted_path for d in block.diffs],
},
}
)
elif isinstance(block, CallbackBlock):
callbacks = []
for (text, callback) in block.choices.items():
callbacks.append(
{
"text": text,
"callback": ExternalActionRequestBuilder.create_for_func(
callback,
sink_name,
text,
account_id,
cluster_id,
signing_key,
).json(),
}
)
structured_data.append({"type": "callbacks", "data": callbacks})
elif isinstance(block, JsonBlock):
structured_data.append({"type": "json", "data": block.json_str})
elif isinstance(block, EventsRef):
structured_data.append({"type": "events_ref", "data": block.dict()})
else:
logging.error(f"cannot convert block of type {type(block)} to robusta platform format block: {block}")
continue # no reason to crash the entire report
if not structured_data:
return {}
return {
"issue_id": str(finding_id),
"file_type": "structured_data",
"data": json.dumps(structured_data),
"account_id": account_id,
} | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/core/sinks/robusta/dal/model_conversion.py | 0.601242 | 0.231745 | model_conversion.py | pypi |
import logging
from typing import Dict, Any, Optional
import requests
from robusta.core.model.k8s_operation_type import K8sOperationType
from robusta.core.reporting.base import BaseBlock, Finding, FindingSeverity, Enrichment
from robusta.core.reporting.blocks import (
HeaderBlock,
JsonBlock,
KubernetesDiffBlock,
ListBlock,
MarkdownBlock,
TableBlock,
)
from robusta.core.reporting.consts import FindingAggregationKey
from robusta.core.sinks.pagerduty.pagerduty_sink_params import PagerdutyConfigWrapper
from robusta.core.sinks.sink_base import SinkBase
class PagerdutySink(SinkBase):
def __init__(self, sink_config: PagerdutyConfigWrapper, registry):
super().__init__(sink_config.pagerduty_sink, registry)
self.events_url = "https://events.pagerduty.com/v2/enqueue/"
self.change_url = "https://events.pagerduty.com/v2/change/enqueue"
self.api_key = sink_config.pagerduty_sink.api_key
@staticmethod
def __to_pagerduty_severity_type(severity: FindingSeverity):
# must be one of [critical, error, warning, info]
# Default Incident Urgency is interpreted as [HIGH, HIGH, LOW, LOW]
# https://support.pagerduty.com/docs/dynamic-notifications
if severity == FindingSeverity.HIGH:
return "critical"
elif severity == FindingSeverity.MEDIUM:
return "error"
elif severity == FindingSeverity.LOW:
return "warning"
elif severity == FindingSeverity.INFO:
return "info"
elif severity == FindingSeverity.DEBUG:
return "info"
else:
return "critical"
@staticmethod
def __to_pagerduty_status_type(title: str):
# very dirty implementation, I am deeply sorry
# must be one of [trigger, acknowledge or resolve]
if title.startswith("[RESOLVED]"):
return "resolve"
else:
return "trigger"
@staticmethod
def __send_changes_to_pagerduty(self, finding: Finding, platform_enabled: bool):
custom_details: dict = {}
links = []
if platform_enabled:
links.append({
"text": "🔂 See change history in Robusta",
"href": finding.get_investigate_uri(self.account_id, self.cluster_name)
})
else:
links.append({
"text": "🔂 Enable Robusta UI to see change history",
"href": "https://bit.ly/robusta-ui-pager-duty"
})
source = self.cluster_name
custom_details["namespace"] = finding.service.namespace
custom_details["resource"] = f"{finding.service.resource_type}/{finding.subject.name}"
if finding.subject.node:
custom_details["node"] = finding.subject.node
timestamp = finding.starts_at.astimezone().isoformat()
for enrichment in finding.enrichments:
for block in enrichment.blocks:
if not isinstance(block, KubernetesDiffBlock):
continue
changes = self.__block_to_changes(block, enrichment)
operation = changes["operation"]
if not operation:
continue
unformatted_texts = self.__to_unformatted_text_for_changes(block)
if unformatted_texts:
change_num = 1
for diff_text in unformatted_texts:
custom_details[f"Change {change_num}"] = diff_text
change_num += 1
description = finding.description
changes_count_text = ""
if operation == K8sOperationType.UPDATE and description:
custom_details["Remarks"] = description
change_count = changes["change_count"]
changes_count_text = f" ({change_count} {'change' if change_count == 1 else 'changes'})"
elif description:
custom_details["Remarks"] = f"Resource {operation.value}d"
summary = f"{finding.service.resource_type} {finding.service.namespace}/{finding.service.name} {operation.value}d in cluster {self.cluster_name}{changes_count_text}"
body = {
"routing_key": self.api_key,
"payload": {
"summary": summary,
"timestamp": timestamp,
"source": source,
"custom_details": custom_details
},
"links": links
}
headers = {"Content-Type": "application/json"}
response = requests.post(self.change_url, json=body, headers=headers)
if not response.ok:
logging.error(
f"Error sending message to PagerDuty: {response.status_code}, {response.reason}, {response.text}"
)
@staticmethod
def __send_events_to_pagerduty(self, finding: Finding, platform_enabled: bool):
custom_details: dict = {}
links = []
if platform_enabled:
links.append({
"text": "🔎 Investigate in Robusta",
"href": finding.get_investigate_uri(self.account_id, self.cluster_name)
})
if finding.add_silence_url:
links.append({
"text": "🔕 Create Prometheus Silence",
"href": finding.get_prometheus_silence_url(self.account_id, self.cluster_name)
})
else:
links.append({
"text": "🔎 Enable Robusta UI to investigate",
"href": "https://bit.ly/robusta-ui-pager-duty"
})
if finding.add_silence_url:
links.append({
"text": "🔕 Enable Robusta UI to silence alerts",
"href": "https://bit.ly/robusta-ui-pager-duty"
})
# custom fields that don't have an inherent meaning in PagerDuty itself:
custom_details["Resource"] = finding.subject.name
custom_details["Cluster running Robusta"] = self.cluster_name
custom_details["Namespace"] = finding.subject.namespace
custom_details["Node"] = finding.subject.node
custom_details["Source of the Alert"] = str(finding.source.name)
custom_details["Severity"] = PagerdutySink.__to_pagerduty_severity_type(finding.severity).upper()
custom_details["Fingerprint ID"] = finding.fingerprint
custom_details["Description"] = finding.description
custom_details[
"Caption"
] = f"{finding.severity.to_emoji()} {PagerdutySink.__to_pagerduty_severity_type(finding.severity)} - {finding.title}"
message_lines = ""
if finding.description:
message_lines = finding.description + "\n\n"
for enrichment in finding.enrichments:
for block in enrichment.blocks:
text = self.__to_unformatted_text_for_alerts(block)
if not text:
continue
message_lines += text + "\n\n"
custom_details["state_message"] = message_lines
body = {
"payload": {
"summary": finding.title,
"severity": PagerdutySink.__to_pagerduty_severity_type(finding.severity),
"source": self.cluster_name,
"component": str(finding.subject),
"group": finding.service_key,
"class": finding.aggregation_key,
"custom_details": custom_details,
},
"routing_key": self.api_key,
"event_action": PagerdutySink.__to_pagerduty_status_type(finding.title),
"dedup_key": finding.fingerprint,
"links": links,
}
headers = {"Content-Type": "application/json"}
response = requests.post(self.events_url, json=body, headers=headers)
if not response.ok:
logging.error(
f"Error sending message to PagerDuty: {response.status_code}, {response.reason}, {response.text}"
)
def write_finding(self, finding: Finding, platform_enabled: bool):
if finding.aggregation_key == FindingAggregationKey.CONFIGURATION_CHANGE_KUBERNETES_RESOURCE_CHANGE.value:
return PagerdutySink.__send_changes_to_pagerduty(self, finding=finding, platform_enabled=platform_enabled)
return PagerdutySink.__send_events_to_pagerduty(self, finding=finding, platform_enabled=platform_enabled)
@staticmethod
def __to_unformatted_text_for_alerts(block: BaseBlock) -> str:
if isinstance(block, HeaderBlock):
return block.text
elif isinstance(block, TableBlock):
return block.to_table_string()
elif isinstance(block, ListBlock):
return "\n".join(block.items)
elif isinstance(block, MarkdownBlock):
return block.text
elif isinstance(block, JsonBlock):
return block.json_str
elif isinstance(block, KubernetesDiffBlock):
return "\n".join(
map(
lambda diff: f"* {diff.formatted_path}",
block.diffs,
)
)
return ""
@staticmethod
def __to_unformatted_text_for_changes(block: KubernetesDiffBlock) -> Optional[list[str]]:
return list(map(
lambda diff: diff.formatted_path,
block.diffs,
))
# fetch the changed values from the block
@staticmethod
def __block_to_changes(block: KubernetesDiffBlock, enrichment: Enrichment) -> Dict[str, Any]:
operation = enrichment.annotations.get("operation")
change_count = 0
if operation == K8sOperationType.UPDATE:
change_count = block.num_modifications
return {
"change_count": change_count,
"operation": operation,
} | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/core/sinks/pagerduty/pagerduty_sink.py | 0.730386 | 0.197116 | pagerduty_sink.py | pypi |
from typing import List, Optional
from hikaru.model.rel_1_26 import Event, EventList
from robusta.core.reporting import EventRow, EventsBlock, TableBlock
from robusta.core.reporting.custom_rendering import RendererType, render_value
from robusta.integrations.kubernetes.api_client_utils import parse_kubernetes_datetime_to_ms
def filter_event(ev: Event, name_substring_filter: str, included_types: Optional[List[str]]) -> bool:
if name_substring_filter is not None and name_substring_filter not in ev.regarding.name:
return False
if included_types is not None and ev.type.lower() not in [t.lower() for t in included_types]:
return False
return True
def get_resource_events_table(
table_name: str,
kind: str,
name: str = None,
namespace: str = None,
name_substring: str = "",
included_types: Optional[List[str]] = None,
max_events: Optional[int] = None,
) -> Optional[TableBlock]:
field_selector = f"regarding.kind={kind}"
if name:
field_selector += f",regarding.name={name}"
if namespace:
field_selector += f",regarding.namespace={namespace}"
event_list: EventList = EventList.listEventForAllNamespaces(field_selector=field_selector).obj
if not event_list.items:
return
headers = ["reason", "type", "time", "message"]
filtered_events = [ev for ev in event_list.items if filter_event(ev, name_substring, included_types)]
if not filtered_events:
return
sorted_events = sorted(filtered_events, key=get_event_timestamp, reverse=True)
if max_events is not None:
sorted_events = sorted_events[:max_events]
rows = [
[
event.reason,
event.type,
parse_kubernetes_datetime_to_ms(get_event_timestamp(event)) if get_event_timestamp(event) else 0,
event.note,
]
for event in sorted_events
]
events = [
EventRow(
reason=event.reason,
type=event.type,
time=render_value(
RendererType.DATETIME,
parse_kubernetes_datetime_to_ms(get_event_timestamp(event)) if get_event_timestamp(event) else 0,
),
message=event.note,
kind=kind.lower(),
name=name,
namespace=namespace,
)
for event in sorted_events
]
return EventsBlock(
events=events,
rows=rows,
headers=headers,
column_renderers={"time": RendererType.DATETIME},
table_name=table_name,
column_width=[1, 1, 1, 2],
)
def get_event_timestamp(event: Event):
if event.deprecatedLastTimestamp:
return event.deprecatedLastTimestamp
elif event.eventTime:
return event.eventTime
elif event.deprecatedFirstTimestamp:
return event.deprecatedFirstTimestamp
if event.metadata.creationTimestamp:
return event.metadata.creationTimestamp
return
def get_events_list(event_type: str = None) -> EventList:
"""
event_types are ["Normal","Warning"]
"""
if event_type:
field_selector = f"type={event_type}"
return EventList.listEventForAllNamespaces(field_selector=field_selector).obj
return EventList.listEventForAllNamespaces().obj
def get_resource_events(
kind: Optional[str] = None,
name: Optional[str] = None,
namespace: Optional[str] = None,
name_substring: str = "",
included_types: Optional[List[str]] = None,
) -> List[Event]:
field_selector = ""
if kind:
field_selector = f"regarding.kind={kind}"
if name:
field_selector += f",regarding.name={name}"
if namespace:
field_selector += f",regarding.namespace={namespace}"
event_list: EventList = EventList.listEventForAllNamespaces(field_selector=field_selector).obj
return [ev for ev in event_list.items if filter_event(ev, name_substring, included_types)] | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/core/playbooks/common.py | 0.830903 | 0.221709 | common.py | pypi |
import inspect
from typing import Callable, Dict, List, Optional, Tuple, Type, cast
from pydantic.main import BaseModel
from robusta.core.model.events import ExecutionBaseEvent, ExecutionEventBaseParams
from robusta.utils.decorators import doublewrap
class NotAnActionException(Exception):
def __init__(self, obj):
super(NotAnActionException, self).__init__(f"{obj} is not a playbook action")
@doublewrap
def action(func: Callable):
"""
Decorator to mark functions as playbook actions
"""
setattr(func, "_action_name", func.__name__)
return func
class Action:
def __init__(
self,
func: Callable,
):
if not self.is_action(func):
raise NotAnActionException(func)
self.action_name = func.__name__
self.func = func
self.event_type = self.__get_action_event_type(func)
self.params_type = self.__get_action_params_type(func)
self.from_params_func = None
self.from_params_parameter_class = None
if vars(self.event_type).get("from_params"): # execution event has 'from_params'
self.from_params_func = getattr(self.event_type, "from_params")
from_params_signature = inspect.signature(self.from_params_func)
self.from_params_parameter_class = list(from_params_signature.parameters.values())[0].annotation
@staticmethod
def is_action(func):
return inspect.isfunction(func) and getattr(func, "_action_name", None) is not None
@staticmethod
def __get_action_event_type(func: Callable):
"""
Returns the event_type of a playbook action.
E.g. given an action like:
@action
def some_playbook(event: PodEvent, params: MyPlaybookParams):
pass
This function returns the class PodEvent
"""
func_params = iter(inspect.signature(func).parameters.values())
event_type = next(func_params).annotation
if not issubclass(event_type, ExecutionBaseEvent):
raise Exception(f"Illegal action first parameter {event_type}. Must extend ExecutionBaseEvent")
return event_type
@staticmethod
def __get_action_params_type(func: Callable):
"""
Returns the parameters class for a playbook action or None if the action has no parameters
E.g. given an action like:
@action
def some_playbook(event: PodEvent, params: MyPlaybookParams):
pass
This function returns the class MyPlaybookParams
"""
func_params = iter(inspect.signature(func).parameters.values())
next(func_params) # skip the event parameter
action_params = next(func_params, None)
if not action_params:
return None
params_cls = action_params.annotation
if not issubclass(params_cls, BaseModel):
raise Exception(f"Illegal action second parameter {params_cls}. Action params must extend BaseModel")
return params_cls
class ActionsRegistry:
_actions: Dict[str, Action] = {}
def add_action(self, func: Callable):
self._actions[func.__name__] = Action(func)
def get_action(self, action_name: str) -> Optional[Action]:
return self._actions.get(action_name)
def get_external_actions(
self,
) -> List[Tuple[str, Type[ExecutionEventBaseParams], Optional[Type[BaseModel]]]]:
"""Should be used to prepare calling schema for each action"""
return [
(
action_def.action_name,
cast(Type[ExecutionEventBaseParams], action_def.from_params_parameter_class),
action_def.params_type,
)
for action_def in self._actions.values()
if action_def.from_params_func
] | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/core/playbooks/actions_registry.py | 0.859103 | 0.228006 | actions_registry.py | pypi |
import math
from collections import defaultdict, namedtuple
from datetime import datetime, timedelta
from string import Template
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import humanize
import pygal
from hikaru.model.rel_1_26 import Node
from prometrix import PrometheusQueryResult, PrometheusSeries
from pydantic import BaseModel
from robusta.core.model.base_params import (
ChartValuesFormat,
PrometheusParams,
ResourceChartItemType,
ResourceChartResourceType,
)
from robusta.core.model.env_vars import FLOAT_PRECISION_LIMIT, PROMETHEUS_REQUEST_TIMEOUT_SECONDS
from robusta.core.reporting.blocks import FileBlock
from robusta.core.reporting.custom_rendering import charts_style
ResourceKey = Tuple[ResourceChartResourceType, ResourceChartItemType]
ChartLabelFactory = Callable[[int], str]
class XAxisLine(BaseModel):
label: str
value: float
def __prepare_promql_query(provided_labels: Dict[Any, Any], promql_query_template: str) -> str:
labels: Dict[Any, Any] = defaultdict(lambda: "<missing>")
labels.update(provided_labels)
template = Template(promql_query_template)
promql_query = template.safe_substitute(labels)
return promql_query
from datetime import datetime
from typing import Any, Dict, Optional
from robusta.integrations.prometheus.utils import get_prometheus_connect
def custom_query_range(
prometheus_params: PrometheusParams,
query: str,
start_time: datetime,
end_time: datetime,
step: str,
params: Optional[Dict[str, Any]] = None,
) -> PrometheusQueryResult:
"""
This function wraps prometheus custom_query_range
"""
prom = get_prometheus_connect(prometheus_params)
params = params or {}
prom.check_prometheus_connection(params)
result = prom.custom_query_range(query=query, start_time=start_time, end_time=end_time, step=step, params=params)
return PrometheusQueryResult(data=result)
def get_node_internal_ip(node: Node) -> str:
internal_ip = next(addr.address for addr in node.status.addresses if addr.type == "InternalIP")
return internal_ip
def run_prometheus_query(
prometheus_params: PrometheusParams, promql_query: str, starts_at: datetime, ends_at: datetime
) -> PrometheusQueryResult:
if not starts_at or not ends_at:
raise Exception("Invalid timerange specified for the prometheus query.")
if prometheus_params.prometheus_additional_labels and prometheus_params.add_additional_labels:
promql_query = promql_query.replace("}", __get_additional_labels_str(prometheus_params) + "}")
query_duration = ends_at - starts_at
resolution = get_resolution_from_duration(query_duration)
increment = max(query_duration.total_seconds() / resolution, 1.0)
return custom_query_range(
prometheus_params,
promql_query,
starts_at,
ends_at,
str(increment),
{"timeout": PROMETHEUS_REQUEST_TIMEOUT_SECONDS},
)
_RESOLUTION_DATA: Dict[timedelta, Union[int, Callable[[timedelta], int]]] = {
timedelta(hours=1): 250,
# NOTE: 1 minute resolution, max 1440 points
timedelta(days=1): lambda duration: math.ceil(duration.total_seconds() / 60),
# NOTE: 5 minute resolution, max 2016 points
timedelta(weeks=1): lambda duration: math.ceil(duration.total_seconds() / (60 * 5)),
}
_DEFAULT_RESOLUTION = 3000
def get_resolution_from_duration(duration: timedelta) -> int:
for time_delta, resolution in sorted(_RESOLUTION_DATA.items(), key=lambda x: x[0]):
if duration <= time_delta:
return resolution if isinstance(resolution, int) else resolution(duration)
return _DEFAULT_RESOLUTION
def get_target_name(series: PrometheusSeries) -> Optional[str]:
for label in ["container", "pod", "node"]:
if label in series.metric:
return series.metric[label]
return None
def get_series_job(series: PrometheusSeries) -> Optional[str]:
return series.metric.get("job")
def filter_prom_jobs_results(series_list_result: Optional[List[PrometheusSeries]]) -> Optional[List[PrometheusSeries]]:
if not series_list_result or len(series_list_result) == 1:
return series_list_result
target_names = {get_target_name(series) for series in series_list_result if get_target_name(series)}
return_list: List[PrometheusSeries] = []
# takes kubelet job if exists, return first job alphabetically if it doesn't
for target_name in target_names:
relevant_series = [series for series in series_list_result if get_target_name(series) == target_name]
relevant_kubelet_metric = [series for series in relevant_series if get_series_job(series) == "kubelet"]
if len(relevant_kubelet_metric) == 1:
return_list.append(relevant_kubelet_metric[0])
continue
sorted_relevant_series = sorted(relevant_series, key=get_series_job, reverse=False)
return_list.append(sorted_relevant_series[0])
return return_list
def create_chart_from_prometheus_query(
prometheus_params: PrometheusParams,
promql_query: str,
alert_starts_at: datetime,
include_x_axis: bool,
graph_duration_minutes: int = 0,
chart_title: Optional[str] = None,
values_format: Optional[ChartValuesFormat] = None,
lines: Optional[List[XAxisLine]] = [],
chart_label_factory: Optional[ChartLabelFactory] = None,
filter_prom_jobs: bool = False,
):
if not alert_starts_at:
ends_at = datetime.utcnow()
starts_at = ends_at - timedelta(minutes=graph_duration_minutes)
else:
ends_at = datetime.now(tz=alert_starts_at.tzinfo)
alert_duration = ends_at - alert_starts_at
graph_duration = max(alert_duration, timedelta(minutes=graph_duration_minutes))
starts_at = ends_at - graph_duration
prometheus_query_result = run_prometheus_query(prometheus_params, promql_query, starts_at, ends_at)
if prometheus_query_result.result_type != "matrix":
raise Exception(
f"Unsupported query result for robusta chart, Type received: {prometheus_query_result.result_type}, type supported 'matrix'"
)
chart = pygal.XY(
show_dots=True,
style=charts_style(),
truncate_legend=15,
include_x_axis=include_x_axis,
width=1280,
height=720,
)
chart.x_label_rotation = 35
chart.truncate_label = -1
chart.x_value_formatter = lambda timestamp: datetime.fromtimestamp(timestamp).strftime("%I:%M:%S %p on %d, %b")
value_formatters = {
ChartValuesFormat.Plain: lambda val: str(val),
ChartValuesFormat.Bytes: lambda val: humanize.naturalsize(val, binary=True),
ChartValuesFormat.Percentage: lambda val: f"{(100 * val):.1f}%",
ChartValuesFormat.CPUUsage: lambda val: f"{(1000 * val):.1f}m",
}
chart_values_format = values_format if values_format else ChartValuesFormat.Plain
chart.value_formatter = value_formatters[chart_values_format]
if chart_title:
chart.title = chart_title
else:
chart.title = promql_query
# fix a pygal bug which causes infinite loops due to rounding errors with floating points
# TODO: change min_time time before Jan 19 3001
min_time = 32536799999
max_time = 0
series_list_result = prometheus_query_result.series_list_result
if filter_prom_jobs:
series_list_result = filter_prom_jobs_results(series_list_result)
for i, series in enumerate(series_list_result):
label = get_target_name(series)
if not label:
label = "\n".join([v for (key, v) in series.metric.items() if key != "job"])
# If the label is empty, try to take it from the additional_label_factory
if label == "" and chart_label_factory is not None:
label = chart_label_factory(i)
values = []
for index in range(len(series.values)):
timestamp = series.timestamps[index]
value = round(float(series.values[index]), FLOAT_PRECISION_LIMIT)
values.append((timestamp, value))
min_time = min(min_time, min(series.timestamps))
max_time = max(max_time, max(series.timestamps))
chart.add(label, values)
assert lines is not None
for line in lines:
value = [(min_time, line.value), (max_time, line.value)]
chart.add(line.label, value)
return chart
def __get_additional_labels_str(prometheus_params: PrometheusParams) -> str:
additional_labels = ""
if not prometheus_params.prometheus_additional_labels:
return additional_labels
for key, value in prometheus_params.prometheus_additional_labels.items():
additional_labels += f', {key}="{value}"'
return additional_labels
def create_graph_enrichment(
start_at: datetime,
labels: Dict[Any, Any],
promql_query: str,
prometheus_params: PrometheusParams,
graph_duration_minutes: int,
graph_title: Optional[str],
chart_values_format: Optional[ChartValuesFormat],
lines: Optional[List[XAxisLine]] = [],
chart_label_factory: Optional[ChartLabelFactory] = None,
filter_prom_jobs: bool = False,
) -> FileBlock:
promql_query = __prepare_promql_query(labels, promql_query)
chart = create_chart_from_prometheus_query(
prometheus_params,
promql_query,
start_at,
include_x_axis=True,
graph_duration_minutes=graph_duration_minutes,
chart_title=graph_title,
values_format=chart_values_format,
lines=lines,
chart_label_factory=chart_label_factory,
filter_prom_jobs=filter_prom_jobs,
)
chart_name = graph_title if graph_title else promql_query
svg_name = f"{chart_name}.svg"
return FileBlock(svg_name, chart.render())
def create_resource_enrichment(
starts_at: datetime,
labels: Dict[Any, Any],
resource_type: ResourceChartResourceType,
item_type: ResourceChartItemType,
graph_duration_minutes: int,
prometheus_params: PrometheusParams,
lines: Optional[List[XAxisLine]] = [],
title_override: Optional[str] = None,
) -> FileBlock:
ChartOptions = namedtuple("ChartOptions", ["query", "values_format"])
combinations: Dict[ResourceKey, Optional[ChartOptions]] = {
(ResourceChartResourceType.CPU, ResourceChartItemType.Pod): ChartOptions(
query='sum(irate(container_cpu_usage_seconds_total{namespace="$namespace", pod=~"$pod"}[5m])) by (pod, job)',
values_format=ChartValuesFormat.CPUUsage,
),
(ResourceChartResourceType.CPU, ResourceChartItemType.Node): ChartOptions(
query='instance:node_cpu_utilisation:rate5m{job="node-exporter", instance=~"$node_internal_ip:[0-9]+"} != 0',
values_format=ChartValuesFormat.Percentage,
),
(ResourceChartResourceType.CPU, ResourceChartItemType.Container): ChartOptions(
query='sum(irate(container_cpu_usage_seconds_total{namespace="$namespace", pod=~"$pod", container=~"$container"}[5m])) by (container, pod, job)',
values_format=ChartValuesFormat.CPUUsage,
),
(ResourceChartResourceType.Memory, ResourceChartItemType.Pod): ChartOptions(
query='sum(container_memory_working_set_bytes{pod=~"$pod", container!="", image!=""}) by (pod, job)',
values_format=ChartValuesFormat.Bytes,
),
(ResourceChartResourceType.Memory, ResourceChartItemType.Node): ChartOptions(
query='instance:node_memory_utilisation:ratio{job="node-exporter", instance=~"$node_internal_ip:[0-9]+"} != 0',
values_format=ChartValuesFormat.Percentage,
),
(ResourceChartResourceType.Memory, ResourceChartItemType.Container): ChartOptions(
query='sum(container_memory_working_set_bytes{pod=~"$pod", container=~"$container", image!=""}) by (container, pod, job)',
values_format=ChartValuesFormat.Bytes,
),
(ResourceChartResourceType.Disk, ResourceChartItemType.Pod): None,
(ResourceChartResourceType.Disk, ResourceChartItemType.Node): ChartOptions(
query='sum(sort_desc(1 -(max without (mountpoint, fstype) (node_filesystem_avail_bytes{job="node-exporter", fstype!="", instance=~"$node_internal_ip:[0-9]+"})/max without (mountpoint, fstype) (node_filesystem_size_bytes{job="node-exporter", fstype!="", instance=~"$node_internal_ip:[0-9]+"})) != 0))',
values_format=ChartValuesFormat.Percentage,
),
}
combination = (resource_type, item_type)
chosen_combination = combinations[combination]
if not chosen_combination:
raise AttributeError(f"The following combination for resource chart is not supported: {combination}")
values_format_text = "Utilization" if chosen_combination.values_format == ChartValuesFormat.Percentage else "Usage"
title = (
title_override
if title_override
else f"{resource_type.name} {values_format_text} for this {item_type.name.lower()}"
)
# NOTE: Some queries do not produce automatic labels, so we need to provide them
# Parameter in lambda is the number of the series in the chart to override (excluding lines)
# It could be used if there are multiple series in the chart
chart_label_factories: Dict[ResourceKey, ChartLabelFactory] = {
(ResourceChartResourceType.CPU, ResourceChartItemType.Pod): lambda i: labels.get("pod", "CPU Usage"),
(ResourceChartResourceType.CPU, ResourceChartItemType.Node): lambda i: labels.get("node", "CPU Usage"),
(ResourceChartResourceType.CPU, ResourceChartItemType.Container): lambda i: labels.get(
"container", "CPU Usage"
),
(ResourceChartResourceType.Memory, ResourceChartItemType.Container): lambda i: labels.get(
"container", "Memory Usage"
),
}
graph_enrichment = create_graph_enrichment(
starts_at,
labels,
chosen_combination.query,
prometheus_params=prometheus_params,
graph_duration_minutes=graph_duration_minutes,
graph_title=title,
chart_values_format=chosen_combination.values_format,
lines=lines,
chart_label_factory=chart_label_factories.get(combination),
filter_prom_jobs=True,
)
return graph_enrichment | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/core/playbooks/prometheus_enrichment_utils.py | 0.830181 | 0.23688 | prometheus_enrichment_utils.py | pypi |
import logging
from typing import List, Optional
from hikaru.model.rel_1_26 import Pod
from robusta.core.reporting import BaseBlock, FileBlock, MarkdownBlock
from robusta.integrations.kubernetes.custom_models import NamedRegexPattern, RegexReplacementStyle
def get_crash_report_blocks(
pod: Pod,
regex_replacer_patterns: Optional[NamedRegexPattern] = None,
regex_replacement_style: Optional[RegexReplacementStyle] = None,
) -> List[BaseBlock]:
all_statuses = pod.status.containerStatuses + pod.status.initContainerStatuses
crashed_container_statuses = [
container_status
for container_status in all_statuses
if container_status.state.waiting is not None and container_status.restartCount >= 1
]
blocks: List[BaseBlock] = []
for container_status in crashed_container_statuses:
blocks.append(MarkdownBlock(f"*{container_status.name}* restart count: {container_status.restartCount}"))
if container_status.state and container_status.state.waiting:
blocks.append(
MarkdownBlock(f"*{container_status.name}* waiting reason: {container_status.state.waiting.reason}")
)
if container_status.state and container_status.state.terminated:
blocks.append(
MarkdownBlock(
f"*{container_status.name}* termination reason: {container_status.state.terminated.reason}"
)
)
if container_status.lastState and container_status.lastState.terminated:
blocks.append(
MarkdownBlock(
f"*{container_status.name}* termination reason: {container_status.lastState.terminated.reason}"
)
)
try:
container_log = pod.get_logs(
container_status.name,
previous=True,
regex_replacer_patterns=regex_replacer_patterns,
regex_replacement_style=regex_replacement_style,
)
if container_log:
blocks.append(FileBlock(f"{pod.metadata.name}.txt", container_log))
else:
blocks.append(MarkdownBlock(f"Container logs unavailable for container: {container_status.name}"))
logging.error(
f"could not fetch logs from container: {container_status.name}. logs were {container_log}"
)
except Exception:
logging.error("Failed to get pod logs", exc_info=True)
return blocks | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/core/playbooks/pod_utils/crashloop_utils.py | 0.500244 | 0.187951 | crashloop_utils.py | pypi |
import enum
import json
import logging
import re
from enum import Flag
from typing import List, Optional
from hikaru.model.rel_1_26 import ContainerStatus, Event, EventList, Pod, PodStatus
from robusta.core.reporting import BaseBlock, HeaderBlock, MarkdownBlock
class ImagePullBackoffReason(Flag):
Unknown = 0
RepoDoesntExist = 1
NotAuthorized = 2
ImageDoesntExist = 4
TagNotFound = 8
def get_image_pull_backoff_container_statuses(status: PodStatus) -> List[ContainerStatus]:
return [
container_status
for container_status in status.containerStatuses
if container_status.state.waiting is not None
and container_status.state.waiting.reason in ["ImagePullBackOff", "ErrImagePull"]
]
def decompose_flag(flag: Flag) -> List[Flag]:
members, _ = enum._decompose(flag.__class__, flag._value_)
return members
def get_image_pull_backoff_blocks(pod: Pod) -> Optional[List[BaseBlock]]:
blocks: List[BaseBlock] = []
pod_name = pod.metadata.name
namespace = pod.metadata.namespace
image_pull_backoff_container_statuses = get_image_pull_backoff_container_statuses(pod.status)
investigator = ImagePullBackoffInvestigator(pod_name, namespace)
for container_status in image_pull_backoff_container_statuses:
investigation = investigator.investigate(container_status)
blocks.extend(
[
HeaderBlock(f"ImagePullBackOff in container {container_status.name}"),
MarkdownBlock(f"*Image:* {container_status.image}"),
]
)
# TODO: this happens when there is a backoff but the original events containing the actual error message are already gone
# and all that remains is a backoff event without a detailed error message - maybe we should identify that case and
# print "backoff - too many failed image pulls" or something like that
if investigation is None:
events = [
{
"type": event.type,
"reason": event.reason,
"source.component": event.deprecatedSource.component,
"message": event.note,
}
for event in investigator.pod_events.items
]
logging.info(
"could not find the image pull error in the kubernetes events. All the relevant events follow, so we can figure out why"
)
logging.info(json.dumps(events, indent=4))
continue
reason = investigation.reason
error_message = investigation.error_message
if reason != ImagePullBackoffReason.Unknown:
reasons = decompose_flag(reason)
if len(reasons) == 1:
blocks.extend(
[
MarkdownBlock(f"*Reason:* {reason}"),
]
)
else:
line_separated_reasons = "\n".join([f"{r}" for r in reasons])
blocks.extend(
[
MarkdownBlock(f"*Possible reasons:*\n{line_separated_reasons}"),
]
)
else:
blocks.append(MarkdownBlock(f"*Error message:* {container_status.name}:\n{error_message}"))
return blocks
class ImagePullOffInvestigation:
error_message: str
reason: ImagePullBackoffReason
def __init__(self, error_message: str, reason: ImagePullBackoffReason):
self.error_message = error_message
self.reason = reason
class ImagePullBackoffInvestigator:
configs = [
# Containerd
{
"err_template": 'failed to pull and unpack image ".*?": failed to resolve reference ".*?": .*?: not found',
"reason": ImagePullBackoffReason.RepoDoesntExist
| ImagePullBackoffReason.ImageDoesntExist
| ImagePullBackoffReason.TagNotFound,
},
{
"err_template": (
'failed to pull and unpack image ".*?": failed to resolve reference ".*?": '
"pull access denied, repository does not exist or may require authorization: server message: "
"insufficient_scope: authorization failed"
),
"reason": ImagePullBackoffReason.NotAuthorized | ImagePullBackoffReason.ImageDoesntExist,
},
{
"err_template": (
'failed to pull and unpack image ".*?": failed to resolve reference ".*?": '
"failed to authorize: failed to fetch anonymous token: unexpected status: 403 Forbidden"
),
"reason": ImagePullBackoffReason.NotAuthorized,
},
# Docker
{
"err_template": (
"Error response from daemon: pull access denied for .*?, "
"repository does not exist or may require 'docker login': denied: requested access to the resource is denied"
),
"reason": ImagePullBackoffReason.NotAuthorized | ImagePullBackoffReason.ImageDoesntExist,
},
{
"err_template": "Error response from daemon: manifest for .*? not found: manifest unknown: manifest unknown",
"reason": ImagePullBackoffReason.TagNotFound,
},
{
"err_template": (
'Error response from daemon: Head ".*?": denied: '
'Permission "artifactregistry.repositories.downloadArtifacts" denied on resource ".*?" \\(or it may not exist\\)'
),
"reason": ImagePullBackoffReason.NotAuthorized,
},
{
"err_template": 'Error response from daemon: manifest for .*? not found: manifest unknown: Failed to fetch ".*?"',
"reason": ImagePullBackoffReason.ImageDoesntExist | ImagePullBackoffReason.TagNotFound,
},
]
def __init__(self, pod_name: str, namespace: str):
self.pod_name = pod_name
self.namespace = namespace
self.pod_events: EventList = EventList.listNamespacedEvent(
self.namespace, field_selector=f"regarding.name={self.pod_name}"
).obj
def investigate(self, container_status: ContainerStatus) -> Optional[ImagePullOffInvestigation]:
for pod_event in self.pod_events.items:
error_message = self.get_kubelet_image_pull_error_from_event(pod_event, container_status.image)
logging.debug(f"for {pod_event} got message: {error_message}")
if error_message is None:
continue
reason = self.get_reason_from_kubelet_image_pull_error(error_message)
logging.debug(f"reason is: {reason}")
return ImagePullOffInvestigation(error_message=error_message, reason=reason)
return None
@staticmethod
def get_kubelet_image_pull_error_from_event(pod_event: Event, image_name: str) -> Optional[str]:
if pod_event.type != "Warning":
return None
if pod_event.reason != "Failed":
return None
if pod_event.deprecatedSource.component != "kubelet":
return None
prefixes = [
f'Failed to pull image "{image_name}": rpc error: code = Unknown desc = ',
f'Failed to pull image "{image_name}": rpc error: code = NotFound desc = ',
]
for prefix in prefixes:
if pod_event.note.startswith(prefix):
return pod_event.note[len(prefix) :]
return None
def get_reason_from_kubelet_image_pull_error(self, kubelet_image_pull_error: str) -> ImagePullBackoffReason:
for config in self.configs:
err_template = config["err_template"]
reason = config["reason"]
if re.fullmatch(err_template, kubelet_image_pull_error) is not None:
return reason
return ImagePullBackoffReason.Unknown | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/core/playbooks/pod_utils/imagepull_utils.py | 0.496094 | 0.196267 | imagepull_utils.py | pypi |
import logging
from collections import defaultdict
from concurrent.futures.process import ProcessPoolExecutor
from typing import Dict, List, Optional, Union
from hikaru.model.rel_1_26 import Deployment, DaemonSet, StatefulSet, Job, Pod, ReplicaSet, Volume, Container
from kubernetes import client
from kubernetes.client import (
V1Container,
V1DaemonSet,
V1DaemonSetList,
V1Deployment,
V1DeploymentList,
V1Job,
V1JobList,
V1NodeList,
V1ObjectMeta,
V1Pod,
V1PodList,
V1ReplicaSetList,
V1StatefulSet,
V1StatefulSetList,
V1Volume,
)
from pydantic import BaseModel
from robusta.core.discovery import utils
from robusta.core.model.cluster_status import ClusterStats
from robusta.core.model.env_vars import DISCOVERY_BATCH_SIZE, DISCOVERY_MAX_BATCHES, DISCOVERY_PROCESS_TIMEOUT_SEC, \
DISABLE_HELM_MONITORING
from robusta.core.model.helm_release import HelmRelease
from robusta.core.model.jobs import JobInfo
from robusta.core.model.namespaces import NamespaceInfo
from robusta.core.model.services import ContainerInfo, ServiceConfig, ServiceInfo, VolumeInfo
from robusta.utils.cluster_provider_discovery import cluster_provider
import prometheus_client
discovery_errors_count = prometheus_client.Counter("discovery_errors", "Number of discovery process failures.")
discovery_process_time = prometheus_client.Summary(
"discovery_process_time",
"Total discovery process time (seconds)",
)
class DiscoveryResults(BaseModel):
services: List[ServiceInfo] = []
nodes: Optional[V1NodeList] = None
node_requests: Dict = {}
jobs: List[JobInfo] = []
namespaces: List[NamespaceInfo] = []
helm_releases: List[HelmRelease] = []
class Config:
arbitrary_types_allowed = True
class Discovery:
executor = ProcessPoolExecutor(max_workers=1) # always 1 discovery process
@staticmethod
def __create_service_info(
meta: V1ObjectMeta,
kind: str,
containers: List[V1Container],
volumes: List[V1Volume],
total_pods: int,
ready_pods: int,
is_helm_release: bool = False,
) -> ServiceInfo:
container_info = [ContainerInfo.get_container_info(container) for container in containers] if containers else []
volumes_info = [VolumeInfo.get_volume_info(volume) for volume in volumes] if volumes else []
config = ServiceConfig(labels=meta.labels or {}, containers=container_info, volumes=volumes_info)
version = getattr(meta, "resource_version", None) or getattr(meta, "resourceVersion", None)
resource_version = int(version) if version else 0
return ServiceInfo(
resource_version=resource_version,
name=meta.name,
namespace=meta.namespace,
service_type=kind,
service_config=config,
ready_pods=ready_pods,
total_pods=total_pods,
is_helm_release=is_helm_release,
)
@staticmethod
def create_service_info(obj: Union[Deployment, DaemonSet, StatefulSet, Pod, ReplicaSet]) -> ServiceInfo:
return Discovery.__create_service_info(
obj.metadata,
obj.kind,
extract_containers(obj),
extract_volumes(obj),
extract_total_pods(obj),
extract_ready_pods(obj),
is_helm_release=is_release_managed_by_helm(annotations=obj.metadata.annotations, labels=obj.metadata.labels))
@staticmethod
def discovery_process() -> DiscoveryResults:
pods_metadata: List[V1ObjectMeta] = []
node_requests = defaultdict(list) # map between node name, to request of pods running on it
active_services: List[ServiceInfo] = []
# discover micro services
try:
# discover deployments
# using k8s api `continue` to load in batches
continue_ref: Optional[str] = None
for _ in range(DISCOVERY_MAX_BATCHES):
deployments: V1DeploymentList = client.AppsV1Api().list_deployment_for_all_namespaces(
limit=DISCOVERY_BATCH_SIZE, _continue=continue_ref
)
active_services.extend(
[
Discovery.__create_service_info(
deployment.metadata,
"Deployment",
extract_containers(deployment),
extract_volumes(deployment),
extract_total_pods(deployment),
extract_ready_pods(deployment),
is_helm_release=is_release_managed_by_helm(annotations=deployment.metadata.annotations,
labels=deployment.metadata.labels)
)
for deployment in deployments.items
]
)
continue_ref = deployments.metadata._continue
if not continue_ref:
break
# discover statefulsets
continue_ref = None
for _ in range(DISCOVERY_MAX_BATCHES):
statefulsets: V1StatefulSetList = client.AppsV1Api().list_stateful_set_for_all_namespaces(
limit=DISCOVERY_BATCH_SIZE, _continue=continue_ref
)
active_services.extend(
[
Discovery.__create_service_info(
statefulset.metadata,
"StatefulSet",
extract_containers(statefulset),
extract_volumes(statefulset),
extract_total_pods(statefulset),
extract_ready_pods(statefulset),
is_helm_release=is_release_managed_by_helm(annotations=statefulset.metadata.annotations,
labels=statefulset.metadata.labels)
)
for statefulset in statefulsets.items
]
)
continue_ref = statefulsets.metadata._continue
if not continue_ref:
break
# discover daemonsets
continue_ref = None
for _ in range(DISCOVERY_MAX_BATCHES):
daemonsets: V1DaemonSetList = client.AppsV1Api().list_daemon_set_for_all_namespaces(
limit=DISCOVERY_BATCH_SIZE, _continue=continue_ref
)
active_services.extend(
[
Discovery.__create_service_info(
daemonset.metadata,
"DaemonSet",
extract_containers(daemonset),
extract_volumes(daemonset),
extract_total_pods(daemonset),
extract_ready_pods(daemonset),
is_helm_release=is_release_managed_by_helm(annotations=daemonset.metadata.annotations,
labels=daemonset.metadata.labels)
)
for daemonset in daemonsets.items
]
)
continue_ref = daemonsets.metadata._continue
if not continue_ref:
break
# discover replicasets
continue_ref = None
for _ in range(DISCOVERY_MAX_BATCHES):
replicasets: V1ReplicaSetList = client.AppsV1Api().list_replica_set_for_all_namespaces(
limit=DISCOVERY_BATCH_SIZE, _continue=continue_ref
)
active_services.extend(
[
Discovery.__create_service_info(
replicaset.metadata,
"ReplicaSet",
extract_containers(replicaset),
extract_volumes(replicaset),
extract_total_pods(replicaset),
extract_ready_pods(replicaset),
is_helm_release=is_release_managed_by_helm(annotations=replicaset.metadata.annotations,
labels=replicaset.metadata.labels)
)
for replicaset in replicasets.items
if not replicaset.metadata.owner_references and replicaset.spec.replicas > 0
]
)
continue_ref = replicasets.metadata._continue
if not continue_ref:
break
# discover pods
continue_ref = None
for _ in range(DISCOVERY_MAX_BATCHES):
pods: V1PodList = client.CoreV1Api().list_pod_for_all_namespaces(
limit=DISCOVERY_BATCH_SIZE, _continue=continue_ref
)
for pod in pods.items:
pods_metadata.append(pod.metadata)
if not pod.metadata.owner_references and not is_pod_finished(pod):
active_services.append(
Discovery.__create_service_info(
pod.metadata,
"Pod",
extract_containers(pod),
extract_volumes(pod),
extract_total_pods(pod),
extract_ready_pods(pod),
is_helm_release=is_release_managed_by_helm(annotations=pod.metadata.annotations,
labels=pod.metadata.labels)
)
)
pod_status = pod.status.phase
if pod_status in ["Running", "Unknown", "Pending"] and pod.spec.node_name:
node_requests[pod.spec.node_name].append(utils.k8s_pod_requests(pod))
continue_ref = pods.metadata._continue
if not continue_ref:
break
except Exception as e:
logging.error(
"Failed to run periodic service discovery",
exc_info=True,
)
raise e
# discover nodes - no need for batching. Number of nodes is not big enough
try:
current_nodes: V1NodeList = client.CoreV1Api().list_node()
except Exception as e:
logging.error(
"Failed to run periodic nodes discovery",
exc_info=True,
)
raise e
# discover jobs
active_jobs: List[JobInfo] = []
try:
continue_ref: Optional[str] = None
for _ in range(DISCOVERY_MAX_BATCHES):
current_jobs: V1JobList = client.BatchV1Api().list_job_for_all_namespaces(
limit=DISCOVERY_BATCH_SIZE, _continue=continue_ref
)
for job in current_jobs.items:
job_pods = []
job_labels = {}
if job.spec.selector:
job_labels = job.spec.selector.match_labels
elif job.metadata.labels:
job_name = job.metadata.labels.get("job-name", None)
if job_name:
job_labels = {"job-name": job_name}
if job_labels: # add job pods only if we found a valid selector
job_pods = [
pod_meta.name
for pod_meta in pods_metadata
if (
(job.metadata.namespace == pod_meta.namespace)
and (job_labels.items() <= (pod_meta.labels or {}).items())
)
]
active_jobs.append(JobInfo.from_api_server(job, job_pods))
continue_ref = current_jobs.metadata._continue
if not continue_ref:
break
except Exception as e:
logging.error(
"Failed to run periodic jobs discovery",
exc_info=True,
)
raise e
helm_releases_map: dict[str, HelmRelease] = {}
if not DISABLE_HELM_MONITORING:
# discover helm state
try:
continue_ref: Optional[str] = None
for _ in range(DISCOVERY_MAX_BATCHES):
secrets = client.CoreV1Api().list_secret_for_all_namespaces(label_selector=f"owner=helm",
_continue=continue_ref)
if not secrets.items:
break
for secret_item in secrets.items:
release_data = secret_item.data.get("release", None)
if not release_data:
continue
try:
decoded_release_row = HelmRelease.from_api_server(secret_item.data['release'])
# we use map here to deduplicate and pick only the latest release data
helm_releases_map[decoded_release_row.get_service_key()] = decoded_release_row
except Exception as e:
logging.error(f"an error occured while decoding helm releases: {e}")
continue_ref = secrets.metadata._continue
if not continue_ref:
break
except Exception as e:
logging.error(
"Failed to run periodic helm discovery",
exc_info=True,
)
raise e
# discover namespaces
try:
namespaces: List[NamespaceInfo] = [
NamespaceInfo.from_api_server(namespace) for namespace in client.CoreV1Api().list_namespace().items
]
except Exception as e:
logging.error(
"Failed to run periodic namespaces discovery",
exc_info=True,
)
raise e
return DiscoveryResults(
services=active_services,
nodes=current_nodes,
node_requests=node_requests,
jobs=active_jobs,
namespaces=namespaces,
helm_releases=list(helm_releases_map.values())
)
@staticmethod
@discovery_errors_count.count_exceptions()
@discovery_process_time.time()
def discover_resources() -> DiscoveryResults:
try:
future = Discovery.executor.submit(Discovery.discovery_process)
return future.result(timeout=DISCOVERY_PROCESS_TIMEOUT_SEC)
except Exception as e:
# We've seen this and believe the process is killed due to oom kill
# The process pool becomes not usable, so re-creating it
logging.error("Discovery process internal error")
Discovery.executor.shutdown()
Discovery.executor = ProcessPoolExecutor(max_workers=1)
logging.info("Initialized new discovery pool")
raise e
@staticmethod
def discover_stats() -> ClusterStats:
deploy_count = -1
sts_count = -1
dms_count = -1
rs_count = -1
pod_count = -1
node_count = -1
job_count = -1
try:
deps: V1DeploymentList = client.AppsV1Api().list_deployment_for_all_namespaces(limit=1, _continue=None)
remaining = deps.metadata.remaining_item_count or 0
deploy_count = remaining + len(deps.items)
except Exception:
logging.error("Failed to count deployments", exc_info=True)
try:
sts: V1StatefulSetList = client.AppsV1Api().list_stateful_set_for_all_namespaces(limit=1, _continue=None)
remaining = sts.metadata.remaining_item_count or 0
sts_count = remaining + len(sts.items)
except Exception:
logging.error("Failed to count statefulsets", exc_info=True)
try:
dms: V1DaemonSetList = client.AppsV1Api().list_daemon_set_for_all_namespaces(limit=1, _continue=None)
remaining = dms.metadata.remaining_item_count or 0
dms_count = remaining + len(dms.items)
except Exception:
logging.error("Failed to count daemonsets", exc_info=True)
try:
rs: V1ReplicaSetList = client.AppsV1Api().list_replica_set_for_all_namespaces(limit=1, _continue=None)
remaining = rs.metadata.remaining_item_count or 0
rs_count = remaining + len(rs.items)
except Exception:
logging.error("Failed to count replicasets", exc_info=True)
try:
pods: V1PodList = client.CoreV1Api().list_pod_for_all_namespaces(limit=1, _continue=None)
remaining = pods.metadata.remaining_item_count or 0
pod_count = remaining + len(pods.items)
except Exception:
logging.error("Failed to count pods", exc_info=True)
try:
nodes: V1NodeList = client.CoreV1Api().list_node(limit=1, _continue=None)
remaining = nodes.metadata.remaining_item_count or 0
node_count = remaining + len(nodes.items)
except Exception:
logging.error("Failed to count nodes", exc_info=True)
try:
jobs: V1JobList = client.BatchV1Api().list_job_for_all_namespaces(limit=1, _continue=None)
remaining = jobs.metadata.remaining_item_count or 0
job_count = remaining + len(jobs.items)
except Exception:
logging.error("Failed to count jobs", exc_info=True)
k8s_version: str = None
try:
k8s_version = client.VersionApi().get_code().git_version
except Exception:
logging.exception("Failed to get k8s server version")
return ClusterStats(
deployments=deploy_count,
statefulsets=sts_count,
daemonsets=dms_count,
replicasets=rs_count,
pods=pod_count,
nodes=node_count,
jobs=job_count,
provider=cluster_provider.get_cluster_provider(),
k8s_version=k8s_version
)
# This section below contains utility related to k8s python api objects (rather than hikaru)
def extract_containers(resource) -> List[V1Container]:
"""Extract containers from k8s python api object (not hikaru)"""
try:
containers = []
if (
isinstance(resource, V1Deployment)
or isinstance(resource, V1DaemonSet)
or isinstance(resource, V1StatefulSet)
or isinstance(resource, V1Job)
):
containers = resource.spec.template.spec.containers
elif isinstance(resource, V1Pod):
containers = resource.spec.containers
return containers
except Exception: # may fail if one of the attributes is None
logging.error(f"Failed to extract containers from {resource}", exc_info=True)
return []
# This section below contains utility related to k8s python api objects (rather than hikaru)
def extract_containers_k8(resource) -> List[Container]:
"""Extract containers from k8s python api object (not hikaru)"""
try:
containers = []
if (
isinstance(resource, Deployment)
or isinstance(resource, DaemonSet)
or isinstance(resource, StatefulSet)
or isinstance(resource, Job)
):
containers = resource.spec.template.spec.containers
elif isinstance(resource, Pod):
containers = resource.spec.containers
return containers
except Exception: # may fail if one of the attributes is None
logging.error(f"Failed to extract containers from {resource}", exc_info=True)
return []
def is_pod_ready(pod) -> bool:
conditions = []
if isinstance(pod, V1Pod):
conditions = pod.status.conditions
if isinstance(pod, Pod):
conditions = pod.status.conditions
for condition in conditions:
if condition.type == "Ready":
return condition.status.lower() == "true"
return False
def is_pod_finished(pod) -> bool:
try:
if isinstance(pod, V1Pod) or isinstance(pod, Pod):
# all containers in the pod have terminated, this pod should be removed by GC
return pod.status.phase.lower() in ["succeeded", "failed"]
except AttributeError: # phase is an optional field
return False
def extract_ready_pods(resource) -> int:
try:
if isinstance(resource, Deployment) or isinstance(resource, StatefulSet):
return 0 if not resource.status.readyReplicas else resource.status.readyReplicas
elif isinstance(resource, DaemonSet):
return 0 if not resource.status.numberReady else resource.status.numberReady
elif isinstance(resource, Pod):
return 1 if is_pod_ready(resource) else 0
elif isinstance(resource, V1Pod):
return 1 if is_pod_ready(resource) else 0
elif isinstance(resource, V1Deployment) or isinstance(resource, V1StatefulSet):
return 0 if not resource.status.ready_replicas else resource.status.ready_replicas
elif isinstance(resource, V1DaemonSet):
return 0 if not resource.status.number_ready else resource.status.number_ready
return 0
except Exception: # fields may not exist if all the pods are not ready - example: deployment crashpod
logging.error(f"Failed to extract ready pods from {resource}", exc_info=True)
return 0
def extract_total_pods(resource) -> int:
try:
if isinstance(resource, Deployment) or isinstance(resource, StatefulSet):
# resource.spec.replicas can be 0, default value is 1
return resource.spec.replicas if resource.spec.replicas is not None else 1
elif isinstance(resource, DaemonSet):
return 0 if not resource.status.desiredNumberScheduled else resource.status.desiredNumberScheduled
elif isinstance(resource, Pod):
return 1
if isinstance(resource, V1Deployment) or isinstance(resource, V1StatefulSet):
# resource.spec.replicas can be 0, default value is 1
return resource.spec.replicas if resource.spec.replicas is not None else 1
elif isinstance(resource, V1DaemonSet):
return 0 if not resource.status.desired_number_scheduled else resource.status.desired_number_scheduled
elif isinstance(resource, V1Pod):
return 1
return 0
except Exception:
logging.error(f"Failed to extract total pods from {resource}", exc_info=True)
return 1
def is_release_managed_by_helm(labels: Optional[dict], annotations: Optional[dict]) -> bool:
try:
if labels:
if labels.get('app.kubernetes.io/managed-by') == "Helm":
return True
helm_labels = set(key for key in labels.keys() if key.startswith('helm.') or key.startswith('meta.helm.'))
if helm_labels:
return True
if annotations:
helm_annotations = set(key for key in annotations.keys() if key.startswith('helm.') or
key.startswith('meta.helm.'))
if helm_annotations:
return True
except Exception:
logging.error(
f"Failed to check if deployment was done via helm -> labels: {labels} | annotations: {annotations}")
return False
def extract_volumes(resource) -> List[V1Volume]:
"""Extract volumes from k8s python api object (not hikaru)"""
try:
volumes = []
if (
isinstance(resource, V1Deployment)
or isinstance(resource, V1DaemonSet)
or isinstance(resource, V1StatefulSet)
or isinstance(resource, V1Job)
):
volumes = resource.spec.template.spec.volumes
elif isinstance(resource, V1Pod):
volumes = resource.spec.volumes
return volumes
except Exception: # may fail if one of the attributes is None
logging.error(f"Failed to extract volumes from {resource}", exc_info=True)
return []
def extract_volumes_k8(resource) -> List[Volume]:
"""Extract volumes from k8s python api object (not hikaru)"""
try:
volumes = []
if (
isinstance(resource, Deployment)
or isinstance(resource, DaemonSet)
or isinstance(resource, StatefulSet)
or isinstance(resource, Job)
):
volumes = resource.spec.template.spec.volumes
elif isinstance(resource, Pod):
volumes = resource.spec.volumes
return volumes
except Exception: # may fail if one of the attributes is None
logging.error(f"Failed to extract volumes from {resource}", exc_info=True)
return [] | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/core/discovery/discovery.py | 0.692434 | 0.153486 | discovery.py | pypi |
from typing import Callable, List, Optional
from kubernetes import client
from pydantic import BaseModel
from robusta.utils.error_codes import ActionException, ErrorCodes
class ResourceLister(BaseModel):
list_all: Callable
list_namespaced: Optional[Callable]
LISTERS = { # TODO add ingress and cronjobs once upgrading the k8s client version (when updating hikaru)
"node": ResourceLister(
list_all=client.CoreV1Api().list_node,
),
"deployment": ResourceLister(
list_all=client.AppsV1Api().list_deployment_for_all_namespaces,
list_namespaced=client.AppsV1Api().list_namespaced_deployment,
),
"statefulset": ResourceLister(
list_all=client.AppsV1Api().list_stateful_set_for_all_namespaces,
list_namespaced=client.AppsV1Api().list_namespaced_stateful_set,
),
"job": ResourceLister(
list_all=client.BatchV1Api().list_job_for_all_namespaces,
list_namespaced=client.BatchV1Api().list_namespaced_job,
),
"daemonset": ResourceLister(
list_all=client.AppsV1Api().list_daemon_set_for_all_namespaces,
list_namespaced=client.AppsV1Api().list_namespaced_daemon_set,
),
"persistentvolume": ResourceLister(
list_all=client.CoreV1Api().list_persistent_volume,
),
"persistentvolumeclaim": ResourceLister(
list_all=client.CoreV1Api().list_persistent_volume_claim_for_all_namespaces,
list_namespaced=client.CoreV1Api().list_namespaced_persistent_volume_claim,
),
"service": ResourceLister(
list_all=client.CoreV1Api().list_service_for_all_namespaces,
list_namespaced=client.CoreV1Api().list_namespaced_service,
),
"networkpolicy": ResourceLister(
list_all=client.NetworkingV1Api().list_network_policy_for_all_namespaces,
list_namespaced=client.NetworkingV1Api().list_namespaced_network_policy,
),
"configmap": ResourceLister(
list_all=client.CoreV1Api().list_config_map_for_all_namespaces,
list_namespaced=client.CoreV1Api().list_namespaced_config_map,
),
"ingress": ResourceLister(
list_all=client.NetworkingV1Api().list_ingress_for_all_namespaces,
list_namespaced=client.NetworkingV1Api().list_namespaced_ingress,
),
}
class ResourceNameLister:
@staticmethod
def list_resource_names(kind: str, namespace: str = None) -> List[str]:
if not kind.lower() in LISTERS.keys():
raise ActionException(
error=ErrorCodes.RESOURCE_NOT_SUPPORTED, msg=f"Listing names of {kind} is not supported"
)
lister = LISTERS[kind.lower()]
if namespace:
namespace_lister = lister.list_namespaced
if not namespace_lister:
raise ActionException(
error=ErrorCodes.ILLEGAL_ACTION_PARAMS,
msg=f"Listing names of {kind} for a specific namespace is not supported",
)
resources = namespace_lister(namespace=namespace)
else:
resources = lister.list_all()
return [resource.metadata.name for resource in resources.items] | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/core/discovery/resource_names.py | 0.460532 | 0.205018 | resource_names.py | pypi |
from typing import Dict, List, Optional, Union
from hikaru.model.rel_1_26 import Container, Volume
from kubernetes.client import V1Container, V1Volume
from pydantic import BaseModel
class EnvVar(BaseModel):
name: str
value: str
class Resources(BaseModel):
limits: Dict[str, str]
requests: Dict[str, str]
def __eq__(self, other):
if not isinstance(other, Resources):
return NotImplemented
return self.limits == other.limits and self.requests == other.requests
class ContainerInfo(BaseModel):
name: str
image: str
env: List[EnvVar]
resources: Resources
ports: List[int] = []
@staticmethod
def get_container_info(container: V1Container) -> "ContainerInfo":
env = (
[EnvVar(name=env.name, value=env.value) for env in container.env if env.name and env.value]
if container.env
else []
)
limits = container.resources.limits if container.resources.limits else {}
requests = container.resources.requests if container.resources.requests else {}
resources = Resources(limits=limits, requests=requests)
ports = [p.container_port for p in container.ports] if container.ports else []
return ContainerInfo(name=container.name, image=container.image, env=env, resources=resources, ports=ports)
@staticmethod
def get_container_info_k8(container: Container) -> "ContainerInfo":
env = (
[EnvVar(name=env.name, value=env.value) for env in container.env if env.name and env.value]
if container.env
else []
)
limits = container.resources.limits if container.resources.limits else {}
requests = container.resources.requests if container.resources.requests else {}
resources = Resources(limits=limits, requests=requests)
ports = [p.containerPort for p in container.ports] if container.ports else []
return ContainerInfo(name=container.name, image=container.image, env=env, resources=resources, ports=ports)
def __eq__(self, other):
if not isinstance(other, ContainerInfo):
return NotImplemented
return (
self.name == other.name
and self.image == other.image
and self.resources == other.resources
and sorted(self.env, key=lambda x: x.name) == sorted(other.env, key=lambda x: x.name)
)
class VolumeInfo(BaseModel):
name: str
persistent_volume_claim: Optional[Dict[str, str]]
@staticmethod
def get_volume_info(volume: Union[V1Volume, Volume]) -> "VolumeInfo":
if hasattr(volume, "persistent_volume_claim") and hasattr(volume.persistent_volume_claim, "claim_name"):
return VolumeInfo(
name=volume.name, persistent_volume_claim={"claim_name": volume.persistent_volume_claim.claim_name}
)
return VolumeInfo(name=volume.name)
class ServiceConfig(BaseModel):
labels: Dict[str, str]
containers: List[ContainerInfo]
volumes: List[VolumeInfo]
def __eq__(self, other):
if not isinstance(other, ServiceConfig):
return NotImplemented
# pydantic comparison bug of nested lists and dicts not in the same order
return (
sorted(self.containers, key=lambda x: x.name) == sorted(other.containers, key=lambda x: x.name)
and sorted(self.volumes, key=lambda x: x.name) == sorted(other.volumes, key=lambda x: x.name)
and self.labels != other.labels
)
class ServiceInfo(BaseModel):
resource_version: int = 0
name: str
service_type: str
namespace: str
classification: str = "None"
deleted: bool = False
service_config: Optional[ServiceConfig]
ready_pods: int = 0
total_pods: int = 0
is_helm_release: Optional[bool]
def get_service_key(self) -> str:
return f"{self.namespace}/{self.service_type}/{self.name}"
def __eq__(self, other):
if not isinstance(other, ServiceInfo):
return NotImplemented
return (
self.name == other.name and
self.service_type == other.service_type and
self.namespace == other.namespace and
self.classification == other.classification and
self.is_helm_release == other.is_helm_release and
self.deleted == other.deleted and
self.service_config == other.service_config and
self.ready_pods == other.ready_pods and
self.total_pods == other.total_pods
) | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/core/model/services.py | 0.850887 | 0.217358 | services.py | pypi |
import copy
import logging
import uuid
from collections import defaultdict
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, Optional
from pydantic import BaseModel
from robusta.core.reporting.base import (
BaseBlock,
Finding,
FindingSeverity,
FindingSource,
FindingSubject,
FindingSubjectType,
VideoLink,
)
from robusta.core.sinks import SinkBase
from robusta.integrations.scheduled.playbook_scheduler import PlaybooksScheduler
class EventType(Enum):
KUBERNETES_TOPOLOGY_CHANGE = 1
PROMETHEUS = 2
MANUAL_TRIGGER = 3
SCHEDULED_TRIGGER = 4
class ExecutionEventBaseParams(BaseModel):
named_sinks: Optional[List[str]] = None
class ExecutionContext(BaseModel):
account_id: str
cluster_name: str
# Right now:
# 1. this is a dataclass but we need to make all fields optional in subclasses because of https://stackoverflow.com/questions/51575931/
# 2. this can't be a pydantic BaseModel because of various pydantic bugs (see https://github.com/samuelcolvin/pydantic/pull/2557)
# once the pydantic PR that addresses those issues is merged, this should be a pydantic class
# (note that we need to integrate with dataclasses because of hikaru)
@dataclass
class ExecutionBaseEvent:
# Collection of findings that should be sent to each sink.
# This collection is shared between different playbooks that are triggered by the same event.
sink_findings: Dict[str, List[Finding]] = field(default_factory=lambda: defaultdict(list))
# Target sinks for this execution event. Each playbook may have a different list of target sinks.
named_sinks: Optional[List[str]] = None
all_sinks: Optional[Dict[str, SinkBase]] = None
# Response returned to caller. For admission or manual triggers for example
response: Dict[str, Any] = None # type: ignore
stop_processing: bool = False
_scheduler: Optional[PlaybooksScheduler] = None
_context: Optional[ExecutionContext] = None
def set_context(self, context: ExecutionContext):
self._context = context
def get_context(self) -> ExecutionContext:
return self._context
def set_scheduler(self, scheduler: PlaybooksScheduler):
self._scheduler = scheduler
def get_scheduler(self) -> PlaybooksScheduler:
return self._scheduler
def create_default_finding(self) -> Finding:
"""Create finding default fields according to the event type"""
return Finding(title="Robusta notification", aggregation_key="Generic finding key")
def set_all_sinks(self, all_sinks: Dict[str, SinkBase]):
self.all_sinks = all_sinks
def get_all_sinks(self):
return self.all_sinks
def __prepare_sinks_findings(self):
finding_id: uuid.UUID = uuid.uuid4()
for sink in self.named_sinks:
if len(self.sink_findings[sink]) == 0:
sink_finding = self.create_default_finding()
sink_finding.id = finding_id # share the same finding id between different sinks
self.sink_findings[sink].append(sink_finding)
def add_video_link(self, video_link: VideoLink):
self.__prepare_sinks_findings()
for sink in self.named_sinks:
self.sink_findings[sink][0].add_video_link(video_link, True)
def add_enrichment(
self,
enrichment_blocks: List[BaseBlock],
annotations=None,
):
self.__prepare_sinks_findings()
for sink in self.named_sinks:
self.sink_findings[sink][0].add_enrichment(enrichment_blocks, annotations, True)
def add_finding(self, finding: Finding, suppress_warning: bool = False):
finding.dirty = True # Warn if new enrichments are added to this finding directly
first = True # no need to clone the finding on the first sink. Use the orig finding
for sink in self.named_sinks:
if (len(self.sink_findings[sink]) > 0) and not suppress_warning:
logging.warning(f"Overriding active finding for {sink}. new finding: {finding}")
if not first:
finding = copy.deepcopy(finding)
self.sink_findings[sink].insert(0, finding)
first = False
def override_finding_attributes(
self, title: Optional[str] = None, description: Optional[str] = None, severity: FindingSeverity = None
):
for sink in self.named_sinks:
for finding in self.sink_findings[sink]:
if title:
finding.title = title
if description:
finding.description = description
if severity:
finding.severity = severity
@staticmethod
def from_params(params: ExecutionEventBaseParams) -> Optional["ExecutionBaseEvent"]:
return ExecutionBaseEvent(named_sinks=params.named_sinks)
def get_subject(self) -> FindingSubject:
return FindingSubject(name="Unresolved", subject_type=FindingSubjectType.TYPE_NONE)
@classmethod
def get_source(cls) -> FindingSource:
return FindingSource.NONE | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/core/model/events.py | 0.843734 | 0.210239 | events.py | pypi |
from io import StringIO
_DEFAULT_EXAMPLES = {
"string": "string",
"integer": 1,
"number": 1.0,
"boolean": True,
"array": [],
}
_DEFAULT_STRING_EXAMPLES = {
"date": "2020-01-01",
"date-time": "2020-01-01T01:01:01Z",
"password": "********",
"byte": "QG1pY2hhZWxncmFoYW1ldmFucw==",
"ipv4": "127.0.0.1",
"ipv6": "::1",
}
def example_from_schema(schema):
"""
Generates an example request/response body from the provided schema.
>>> schema = {
... "type": "object",
... "required": ["id", "name"],
... "properties": {
... "id": {
... "type": "integer",
... "format": "int64"
... },
... "name": {
... "type": "string",
... "example": "John Smith"
... },
... "tag": {
... "type": "string"
... }
... }
... }
>>> example = example_from_schema(schema)
>>> assert example == {
... "id": 1,
... "name": "John Smith",
... "tag": "string"
... }
"""
if "example" in schema:
return schema["example"]
elif "default" in schema:
return schema["default"]
elif "oneOf" in schema:
return example_from_schema(schema["oneOf"][0])
elif "anyOf" in schema:
return example_from_schema(schema["anyOf"][0])
elif "allOf" in schema:
# Combine schema examples
example = {}
for sub_schema in schema["allOf"]:
example.update(example_from_schema(sub_schema))
return example
elif "enum" in schema:
return schema["enum"][0]
elif "type" not in schema:
# Any type
return _DEFAULT_EXAMPLES["integer"]
elif schema["type"] == "object" or "properties" in schema:
example = {}
required = schema.get("required", [])
for prop, prop_schema in schema.get("properties", {}).items():
has_example = "example" in prop_schema
# skip optional properties unless they have an explicit example
if prop not in required and not has_example:
continue
example[prop] = example_from_schema(prop_schema)
return example
elif schema["type"] == "array":
items = schema["items"]
min_length = schema.get("minItems", 0)
max_length = schema.get("maxItems", max(min_length, 2))
assert min_length <= max_length
# Try generate at least 2 example array items
gen_length = min(2, max_length) if min_length <= 2 else min_length
example_items = []
if items == {}:
# Any-type arrays
example_items.extend(_DEFAULT_EXAMPLES.values())
elif isinstance(items, dict) and "oneOf" in items:
# Mixed-type arrays
example_items.append(_DEFAULT_EXAMPLES[sorted(items["oneOf"])[0]])
else:
example_items.append(example_from_schema(items))
# Generate array containing example_items and satisfying min_length and max_length
return [example_items[i % len(example_items)] for i in range(gen_length)]
elif schema["type"] == "string":
example_string = _DEFAULT_STRING_EXAMPLES.get(schema.get("format", None), _DEFAULT_EXAMPLES["string"])
min_length = schema.get("minLength", 0)
max_length = schema.get("maxLength", max(min_length, len(example_string)))
gen_length = min(len(example_string), max_length) if min_length <= len(example_string) else min_length
assert 0 <= min_length <= max_length
if min_length <= len(example_string) <= max_length:
return example_string
else:
example_builder = StringIO()
for i in range(gen_length):
example_builder.write(example_string[i % len(example_string)])
example_builder.seek(0)
return example_builder.read()
elif schema["type"] in ("integer", "number"):
example = _DEFAULT_EXAMPLES[schema["type"]]
if "minimum" in schema and "maximum" in schema:
# Take average
example = schema["minimum"] + (schema["maximum"] - schema["minimum"]) / 2
elif "minimum" in schema and example <= schema["minimum"]:
example = schema["minimum"] + 1
elif "maximum" in schema and example >= schema["maximum"]:
example = schema["maximum"] - 1
return float(example) if schema["type"] == "number" else int(example)
else:
return _DEFAULT_EXAMPLES[schema["type"]] | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/utils/json_schema.py | 0.708112 | 0.495239 | json_schema.py | pypi |
import inspect
import json
import logging
from pydantic import BaseModel
from pydantic.fields import ModelField
from robusta.utils.docs import Docstring
class DocumentedModel(BaseModel):
"""
Extends pydantic.BaseModel so that you can document models with docstrings and not using
Field(..., description="foo")
You write docs in the docstring and behind the scenes the actual Fields() will be updated
This way pydantic's introspection and schema-generation works like normal and includes those docs
"""
# warning: __init_subclass__ only works on Python 3.6 and above
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
docs = inspect.getdoc(cls)
if docs is not None:
cls.__update_fields_from_docstring(docs)
@classmethod
def __update_fields_from_docstring(cls, docstring):
"""
Updates pydantic fields according to the docstring so that:
1. you can document individual fields with ":var fieldname: description" in the model's docstring
2. you can provide examples for individual fields with ":example fieldname: value" in the model's docstring
3. docs about individual fields (like :var: and :example:) are removed from the root docs
"""
docs = Docstring(docstring)
for doc_field in docs.fields:
if doc_field.field_target not in cls.__fields__:
logging.warning(
f"The class {cls.__name__} has documentation for the `{doc_field.field_target}` field, but it doesn't exist"
)
continue
f: ModelField = cls.__fields__[doc_field.field_target]
if doc_field.field_type == "example":
f.field_info.extra["example"] = cls.__parse_example(doc_field.field_value)
if doc_field.field_type == "var":
if f.field_info.description:
logging.warning(
f"Overriding existing field description '{f.field_info.description}' with '{doc_field.field_value}'"
)
f.field_info.description = doc_field.field_value
cls.__doc__ = docs.description
@staticmethod
def __parse_example(example: str):
try:
return json.loads(example)
except json.JSONDecodeError:
return example | /robusta_cli-0.10.22a1-py3-none-any.whl/robusta/utils/documented_pydantic.py | 0.47025 | 0.285098 | documented_pydantic.py | pypi |
# ROBUSTA
### Author: [Eitan Hemed](mailto:Eitan.Hemed@gmail.com)
robusta is a statistics package in Python3 providing an interface to
many common statistical analyses, performed using through [R](https://www.r-project.org/)
and [RPY2](https://github.com/rpy2/rpy2).
**PLEASE NOTE** robusta is under active development and is supplied as-is with no guarantees.
## Installation
Install with pip using `pip install robusta-stats`, see also [Installation](https://eitanhemed.github.io/robusta/_build/html/Installation.html).
## Documentation
See [here](https://eitanhemed.github.io/robusta/_build/html/index.html).
## Usage
#### For the most recent, thorough tutorial in the different features of robusta, head on to [Google Colab](https://colab.research.google.com/drive/1jmwYpEGcpFr4CF6ZA5HMiQ2LcHbZqzO_?usp=sharing).
Some of the features are shown below.
### Importing the library and loading data
This could take ~15 seconds as many R libraries are imported under the hood. If you begin with an empty R environment the first you import robusta should take at least a couple of minutes, as R dependencies will be installed.
```python
import robusta as rst
```
First, define a helper function used to pretty-print output of dataframes when converting the notebook to .md ([credit](https://gist.github.com/rgerkin/af5b27a0e30531c30f2bf628aa41a553)).
```python
from tabulate import tabulate
import IPython.display as d
def md_print_df(df):
md = tabulate(df, headers='keys', tablefmt='pipe')
md = md.replace('| |','| %s |' % (df.index.name if df.index.name else ''))
return d.Markdown(md)
```
First off, we need data. Using robusta we can import R built-in and some imported datasets. You can get a full list of the datasets, similarly to calling to `data()` with no input arguments in R.
```python
md_print_df(rst.get_available_datasets().tail())
```
| | Package | Item | Description |
|----:|:----------|:----------------------|:---------------------------------------------------------------------------------------------|
| 284 | ARTool | Higgins1990Table5 | Split-plot Experiment Examining Effect of Moisture and Fertilizer on Dry Matter in Peat Pots |
| 285 | ARTool | Higgins1990Table1.art | Aligned Rank Transformed Version of Higgins1990Table1 |
| 286 | ARTool | Higgins1990Table1 | Synthetic 3x3 Factorial Randomized Experiment |
| 287 | ARTool | ElkinABC | Synthetic 2x2x2 Within-Subjects Experiment |
| 288 | ARTool | ElkinAB | Synthetic 2x2 Within-Subjects Experiment |
We can import a dataset using `rst.load_dataset`
```python
iris = rst.load_dataset('iris')
md_print_df(iris.head())
```
| | dataset_rownames | Sepal.Length | Sepal.Width | Petal.Length | Petal.Width | Species |
|---:|-------------------:|---------------:|--------------:|---------------:|--------------:|:----------|
| 0 | 1 | 5.1 | 3.5 | 1.4 | 0.2 | setosa |
| 1 | 2 | 4.9 | 3 | 1.4 | 0.2 | setosa |
| 2 | 3 | 4.7 | 3.2 | 1.3 | 0.2 | setosa |
| 3 | 4 | 4.6 | 3.1 | 1.5 | 0.2 | setosa |
| 4 | 5 | 5 | 3.6 | 1.4 | 0.2 | setosa |
### Running statistical analyses
Analyses are performed through using designated model objects that also store the . The model objects are returned through calls to the function API. In this example we create a model (`m`) object by calling `t2samples`. `m` will be used to fit the statistical model, returning the `results` object.
Here is a paired-samples t-test using the Students' sleep dataset previously loaded:
```python
# Create the model
m = rst.groupwise.T2Samples(
data=rst.load_dataset('sleep'), independent='group',
dependent='extra', subject='ID', paired=True, tail='less')
# Dataframe format of the results
md_print_df(m.report_table())
```
| | t | df | p-value | Cohen-d Low | Cohen-d | Cohen-d High |
|---:|---------:|-----:|-----------:|--------------:|----------:|---------------:|
| 1 | -4.06213 | 9 | 0.00141645 | -2.11801 | -1.28456 | -0.414622 |
```python
# Textual report of the results - copy and paste into your results section!
m.report_text()
```
't(9) = -4.06, p = 0.001'
We can reset the models in order to update the model parameters and re-fit it. In this example, we run the same model an an independent samples t-test:
```python
m.reset(paired=False, assume_equal_variance=True, refit=True)
md_print_df(m.report_table())
```
| | t | df | p-value | Cohen-d Low | Cohen-d | Cohen-d High |
|---:|---------:|-----:|----------:|--------------:|----------:|---------------:|
| 1 | -1.86081 | 18 | 0.0395934 | -1.73882 | -0.832181 | 0.0954595 |
#### Bayesian t-tests
`bayes_t2samples` and `bayes_t1sample` allow you to calculate Bayes factors or sample from the posterior distribution:
```python
m = rst.groupwise.BayesT2Samples(
data=rst.load_dataset('mtcars'), subject='dataset_rownames',
dependent='mpg', independent='am', prior_scale=0.5,
paired=False)
md_print_df(m.report_table())
```
| | model | bf | error |
|---:|:------------|--------:|------------:|
| 0 | Alt., r=0.5 | 71.3861 | 7.97835e-07 |
```python
# Test different null intervals and prior values:
m.reset(prior_scale=0.1, null_interval=[0, 0.5], refit=True)
print(f'{m.report_text()}\n\n')
md_print_df(m.report_table())
```
Alt., r=0.1 [BF1:0 = 18.64, Error = 0.001%]
| | model | bf | error |
|---:|:------------|--------:|------------:|
| 0 | Alt., r=0.1 | 18.6411 | 2.33663e-05 |
#### Analysis of variance
use `Anova` to run between, within or mixed-design ANOVA, we load the anxiety dataset for the next demonstrations.
For non-parametric ANOVAs see `KruskalWallisTest`, `FriedmanTest` and `AlignedRanksTest`
```python
# Load the dataset and modify it from a 'wide' to 'long' format dataframe
anxiety = rst.load_dataset('anxiety').set_index(['id', 'group']
).filter(regex='^t[1-3]$').stack().reset_index().rename(
columns={0: 'score',
'level_2': 'time'})
md_print_df(anxiety.head())
```
| | id | group | time | score |
|---:|-----:|:--------|:-------|--------:|
| 0 | 1 | grp1 | t1 | 14.1 |
| 1 | 1 | grp1 | t2 | 14.4 |
| 2 | 1 | grp1 | t3 | 14.1 |
| 3 | 2 | grp1 | t1 | 14.5 |
| 4 | 2 | grp1 | t2 | 14.6 |
```python
m = rst.groupwise.Anova(
data=anxiety, subject='id',
dependent='score', between='group', within='time')
md_print_df(m.report_table())
```
R[write to console]: Contrasts set to contr.sum for the following variables: group
| | Term | p-value | Partial Eta-Squared | F | df1 | df2 |
|---:|:-----------|----------:|----------------------:|-------:|------:|------:|
| 1 | group | 0.019 | 0.172 | 4.35 | 2 | 42 |
| 2 | time | 0.001 | 0.904 | 394.91 | 1.79 | 75.24 |
| 3 | group:time | 0.001 | 0.84 | 110.19 | 3.58 | 75.24 |
Similarly, we run the model usign only the between subject term (`group`). As the model was already generated we can simpyl drop the within-subject term:
```python
m.reset(within=None, refit=True)
md_print_df(m.report_table())
```
R[write to console]: Contrasts set to contr.sum for the following variables: group
| | Term | p-value | Partial Eta-Squared | F | df1 | df2 |
|---:|:-------|----------:|----------------------:|-----:|------:|------:|
| 1 | group | 0.019 | 0.172 | 4.35 | 2 | 42 |
R and many other statistical packages (e.g., [statsmodels](https://www.statsmodels.org/stable/index.html) support a formula interface to fit statistical models. Here it is shown that a model can also be specified by the formula kwargs rather than specifying `dependent`, `between` etc. The formula indicates that the score column is regressed by the time variable, with observations nested within the id column.
```python
m.reset(formula='score~time|id', refit=True)
md_print_df(m.report_table())
```
| | Term | p-value | Partial Eta-Squared | F | df1 | df2 |
|---:|:-------|----------:|----------------------:|------:|------:|------:|
| 1 | time | 0.001 | 0.601 | 66.23 | 1.15 | 50.55 |
We can also run a similar, bayesian ANOVA using `BayesAnova` comparing the
specified terms to the null model:
```python
m = rst.groupwise.BayesAnova(data=anxiety, within='time',
dependent='score', subject='id')
md_print_df(m.report_table())
```
| | model | bf | error |
|---:|:--------|--------:|------------:|
| 0 | time | 496.129 | 7.82496e-05 |
## Work in progress and planned features
robusta includes several other features that are either under development or planned for the future.
<ins>Currently under work<ins>
- Regressions and correlations modules
<ins>Planned<ins>
- Sequential analysis plots (inspired by [JASP](https://jasp-stats.org/))
## How to contribute
All help is welcome, but currently there are no specific guidelines. Please contact [Eitan Hemed](mailto:Eitan.Hemed@gmail.com)
| /robusta_stats-0.0.4.tar.gz/robusta_stats-0.0.4/README.md | 0.536556 | 0.98045 | README.md | pypi |
import typing
import warnings
import numpy as np
import pandas as pd
from . import results
from .. import pyr
from ..misc import base
__all__ = ['ChiSquare', 'Correlation', 'PartCorrelation',
'PartialCorrelation', 'BayesCorrelation']
CORRELATION_METHODS = ('pearson', 'spearman', 'kendall')
DEFAULT_CORRELATION_METHOD = 'pearson'
REDUNDANT_BAYES_RESULT_COLS = ['time', 'code']
DEFAULT_CORRELATION_NULL_INTERVAL = pyr.rinterface.NULL
class _PairwiseCorrelation(base.BaseModel):
"""
Parameters
----------
x : [str, array-like]
Input to test. Either strings with the names of columns on the
`data` dataframe or two array-like objects. Each array-like object
must contain only two unique values.
y : [str, array-like]
Input to test. Either strings with the names of columns on the
`data` dataframe or two array-like objects. Each array-like object
must contain only two unique values.
data : pd.DataFrame, optional
Dataframe of data to test, with the columns specieid in `x` and `y`.
Each of the relevant columns must contain only two unique values.
fit : bool, optional
Whether to run the statistical test upon object creation. Default is True.
Raises
------
ValueError
If x and y (strings) are not names of columns in data.
If x and y (array-like) are not of the same length.
# TODO - be able to handle the following:
if x and y are not of the same type.
if x and y are not of the same type.
"""
def __init__(self,
# TODO - modify the 'x' and 'y' types to be non-mapping iterables
x: typing.Iterable = None,
y: typing.Iterable = None,
data: typing.Optional[pd.DataFrame] = None,
fit: bool = True,
nan_action: str = 'raise',
**kwargs):
self.data = data
self.x = x
self.y = y
self.nan_action = nan_action
self._results = None
self._fitted = False
super().__init__()
if fit:
self.fit()
def _pre_process(self):
"""
Pre-process the input arguments prior to fitting the model.
@return:
"""
self._set_model_controllers()
self._select_input_data()
self._validate_input_data()
self._transform_input_data()
def _select_input_data(self):
_data = None
if self.data is None:
if isinstance(self.x, str) and isinstance(self.y, str):
raise ValueError('Specify dataframe and enter `x` and `y`'
' as strings')
if self.x.size == 0 or self.y.size == 0:
raise ValueError('`x` or ``y` are empty')
if self.x.size != self.y.size:
raise ValueError(
'Possibly `x` and ``y` are not of the same length')
_data = pd.DataFrame(columns=['x', 'y'],
data=np.array([self.x, self.y]).T)
elif (isinstance(self.data, pd.DataFrame)
and isinstance(self.x, str) and isinstance(self.y, str)):
if {self.x, self.y}.issubset(set(self.data.columns)):
_data = self.data[[self.x, self.y]].copy()
else:
raise KeyError(f"Either `x` or ({self.x}),`y` ({self.y})"
f" are not columns in data")
if _data is None: # Failed to parse data from input
raise ValueError('Either enter `data` as a pd.DataFrame'
'and `x` and `y` as two column names, or enter'
'`x` and `y` as np.arrays')
self._input_data = _data
def _validate_input_data(self):
if self._input_data.isnull().values.any():
if self.nan_action == 'raise':
raise ValueError('NaN in data, either specify action or '
'remove')
if self.nan_action == 'drop':
self._input_data.dropna(inplace=True)
if self.nan_action == 'replace':
if self.nan_action in ('mean', 'median', 'mode'):
raise NotImplementedError
self._input_data.fillna(self._input_data.apply(nan_action),
inplace=True)
def fit(self):
"""
Fit the statistical model.
Returns
-------
None
Raises
------
RunTimeError
If model was already fitted, raises RunTimeError.
"""
if self._fitted is True:
raise RuntimeError("Model was already run. Use `reset()` method"
" prior to calling `fit()` again!")
self._fitted = True
self._pre_process()
self._analyze()
def _analyze(self):
raise NotImplementedError
def _transform_input_data(self):
pass
def _set_model_controllers(self):
pass
def reset(self, refit=True, **kwargs):
"""
Updates the Model object state and removes current test results.
Parameters
----------
refit
Whether to fit the statistical test after resetting parameters. Default is True.
kwargs
Any keyword arguments of parameters to be updated.
Returns
-------
None
"""
# What else?
vars(self).update(**kwargs)
self._fitted = False
self._results = None
if refit is True:
self.fit()
# @custom_inherit.doc_inherit(_PairwiseCorrelationModel, "numpy_with_merge")
class ChiSquare(_PairwiseCorrelation):
""" Run a frequentist Chi-Square \u03C7\u00B2 test of independence.
.. _Implemented R function stats::t.test: https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/chisq.test
Parameters
---------
correction : bool
Whether to apply Yates' correction (True) or not (False). Default is
False.
Warns
----
RuntimeWarning
In case the crosstabulation of the data contains cells with less
than 5 observeations and apply_correction is set to False.
"""
def __init__(self, apply_correction: bool = False,
**kwargs):
self.apply_correction = apply_correction
super().__init__(**kwargs)
def _select_input_data(self):
super(ChiSquare, self)._select_input_data()
self._crosstab_data()
def _validate_input_data(self):
self._test_crosstab_frequencies()
super()._validate_input_data()
def _test_crosstab_frequencies(self):
if self.crosstabulated_data.min().min() < 5 and not self.apply_correction:
warnings.warn(
'Less than 5 observations in some cell(s). Assumptions'
'may be violated. Use `apply_correction=True`')
def _crosstab_data(self):
"""Returns the crosstabulation of `x` and `y` categories."""
self.crosstabulated_data = pd.crosstab(*self._input_data.values.T)
def _analyze(self):
"""Runs a Chi-Square test"""
self._results = results.ChiSquareResults(
pyr.rpackages.stats.chisq_test(
self.crosstabulated_data,
correct=self.apply_correction))
# @custom_inherit.doc_inherit(_PairwiseCorrelationModel, "numpy_with_merge")
class Correlation(_PairwiseCorrelation):
"""Calculate correlation coefficient in one of several methods.
Parameters
----------
method : str
Type of correlation coefficient. Possible values are
'pearson', 'spearman' or 'kendall'. Default is 'pearson'.
"""
def __init__(self, method: str = 'pearson', alternative: str = 'two.sided',
**kwargs):
self.method = method
self.tail = alternative
super().__init__(**kwargs)
def _validate_input_data(self):
if self.method not in CORRELATION_METHODS:
raise ValueError('Invalid correlation coefficient method - specify'
' either `pearson`, `spearman` or `kendall`')
super()._validate_input_data()
def _analyze(self):
self._results = results.CorrelationResults(
pyr.rpackages.stats.cor_test(
*self._input_data.values.T,
method=self.method,
tail=self.tail,
))
# @custom_inherit.doc_inherit(Correlation, "numpy_with_merge")
class _TriplewiseCorrelation(Correlation):
"""
A base class for correlation between two variables while controlling for
some or all of the effect of a third vriable. Used as base for
PartCorrelation and PartialCorrelation.
Parameters
----------
z : Union[str, array-like]
The control variable for the correlation between x and y. The Either
name of column in data or array-like object of values.
Raises
------
ValueError
If x, y and z (strings) are not names of columns in data.
If x, y and z (array-like) are not of the same length.
if x, y, and z are not of the same type.
"""
def __init__(self, z: typing.Union[str,], **kwargs):
self.z = z
super().__init__(**kwargs)
def _select_input_data(self):
_data = None
if self.data is None:
if sum([isinstance(i, str) for i in [self.x, self.y, self.z]]) == 3:
raise ValueError('Specify dataframe and enter `x`, `y` and `z`'
' as strings.')
try:
_data = pd.DataFrame(columns=['x', 'y', 'z'],
data=np.array([self.x, self.y, self.z]).T)
except ValueError:
raise ValueError('`x`, `y` and `z` are not of the same length')
elif sum([isinstance(i, str) for i in [self.x, self.y, self.z]]) == 3:
try:
_data = self.data[[self.x, self.y, self.z]].copy()
except KeyError:
raise KeyError(f"Either `x` ({self.x}),`y` ({self.y}) or `z`"
f" {self.z} are not columns in data")
if _data is None:
raise ValueError('Either enter `data` as a pd.DataFrame'
'and `x`, `y` and `z` as two column names, or enter'
'`x`, `y` and `z` as np.arrays')
self._input_data = _data
def _analyze(self):
raise NotImplementedError
# @custom_inherit.doc_inherit(_TriplewiseCorrelation, "numpy_with_merge")
class PartialCorrelation(_TriplewiseCorrelation):
"""
Calculates partial correlation.
Part correlation is the correlation between x and y while
the correlation between both `x and z` and `y and z` is controlled for.
R implementation - https://www.rdocumentation.org/packages/ppcor/versions/1.1/topics/pcor.test
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _analyze(self):
self._results = results.PartialCorrelationResults(
pyr.rpackages.ppcor.pcor_test(
*self._input_data.values.T,
method=self.method))
# @custom_inherit.doc_inherit(_TriplewiseCorrelation, "numpy_with_merge")
class PartCorrelation(_TriplewiseCorrelation):
"""
Calculates part (Semi-partial) correlation.
Part correlation is the correlation between x and y while the correlation
between `y and z` is controlled for.
R implementation - https://www.rdocumentation.org/packages/ppcor/versions/1.1/topics/spcor.test
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _analyze(self):
self._results = results.PartCorrelationResults(
pyr.rpackages.ppcor.spcor_test(
*self._input_data.values.T,
method=self.method))
# TODO - see what other
# @custom_inherit.doc_inherit(_PairwiseCorrelationModel, "numpy_with_merge")
class BayesCorrelation(_PairwiseCorrelation):
"""
Calculates Bayes factor or returns posterior samples for correlation.
R implementation - https://www.rdocumentation.org/packages/BayesFactor/versions/0.9.12-4.2/topics/correlationBF
Parameters
----------
rscale_prior: Union[str, float]
Controls the scale of the prior distribution. Default value is 1.0 which
yields a standard Cauchy prior. It is also possible to pass 'medium',
'wide' or 'ultrawide' as input arguments instead of a float (matching
the values of :math:`\\frac{\\sqrt{2}}{2}, 1, \\sqrt{2}`,
respectively).
null_interval: Array-like, optional
Predicted interval for correlation coefficient to test against the null
hypothesis. Optional values for a 'simple' tests are
[-1, 1], (H1: r != 0), [-1, 0] (H1: r < 0) and
[0, 1] (H1: ES > 0).
Default value is [-np.inf, np.inf].
sample_from_posterior : bool, optional
If True return samples from the posterior, if False returns Bayes
factor. Default is False.
Notes
-----
It is better practice to confine null_interval specification to a narrower
range than the one used by default, if you have a prior belief
regarding the expected effect size.
"""
def __init__(self,
rscale_prior: typing.Union[str, float] = 'medium',
null_interval: typing.Optional[typing.List[int]] = None,
sample_from_posterior: bool = False, **kwargs):
self.rscale_prior = rscale_prior
self.sample_from_posterior = sample_from_posterior
if null_interval is None:
self.null_interval = DEFAULT_CORRELATION_NULL_INTERVAL
super().__init__(**kwargs)
def _analyze(self):
self._results = results.BayesCorrelationResults(
pyr.rpackages.BayesFactor.correlationBF(
*self._input_data.values.T,
nullInterval=self.null_interval,
rscale_prior=self.rscale_prior,
posterior=self.sample_from_posterior,
)) | /robusta_stats-0.0.4.tar.gz/robusta_stats-0.0.4/robusta/correlations/models.py | 0.596903 | 0.519034 | models.py | pypi |
import abc
from .. import pyr
from ..misc import utils
class BaseModel(metaclass=abc.ABCMeta):
def __init__(self, **kwargs):
pass
# self.reset(**kwargs)
def reset(self, **kwargs):
"""
This function can be used by the user to re-set all parameters of the
model.
@param kwargs:
@return: None
"""
pass
@abc.abstractmethod
def _pre_process(self):
self._set_model_controllers()
self._select_input_data()
self._validate_input_data()
self._transform_input_data()
@abc.abstractmethod
def _set_model_controllers(self):
pass
@abc.abstractmethod
def _select_input_data(self):
pass
@abc.abstractmethod
def _transform_input_data(self):
pass
@abc.abstractmethod
def _validate_input_data(self):
pass
@abc.abstractmethod
def _analyze(self):
pass
@abc.abstractmethod
def fit(self):
"""
This method runs the model defined by the input.
@return:
"""
self._pre_process()
# returns the results objects that is created with the (r) results object
return self._analyze()
# A problem with the model class and fit class would be that you'd have to
# explicitly pass some information to the results objects (e.g., formula,
# group vs. repeated variables, etc.).
class BaseResults:
columns_rename = {}
returned_columns = []
def __init__(self, r_results, **kwargs):
self.r_results = r_results
self.results_df = self._reformat_r_output_df()
def get_df(self):
return self.results_df.copy()
def _reformat_r_output_df(self):
return None
def _tidy_results(self):
return pyr.rpackages.generics.tidy(self.r_results)
def _get_r_output_df(self):
return utils.convert_df(self._tidy_results())
def _reformat_r_output_df(self):
df = self._get_r_output_df().copy()
df.rename(columns=self.columns_rename, inplace=True)
return df[self.returned_columns] | /robusta_stats-0.0.4.tar.gz/robusta_stats-0.0.4/robusta/misc/base.py | 0.722331 | 0.17172 | base.py | pypi |
import numpy as np
import pandas as pd
import rpy2
from .. import pyr
from ..misc import utils
import rpy2.robjects as ro
def load_dataset(dataset_name, package_name=None):
"""Returns either a dataset (if `dataset_name` is specified)
or information on available datasets (if `dataset_name` is `None`).
Works similarly to R's 'utils::data'
Parameters
----------
dataset_name: [None, str]
Name of requested data set. Default is None (returns data frame
of all available data sets).
package_name: [None, str]
Whether to look for the data set from a specific R package or from
all available packages.
Returns
-------
pd.DataFrame
Either a data frame of the requested data set or data frame of all
available data sets.
"""
return _load(dataset_name, package_name)
def get_available_datasets():
# Get a (x, 4) np.array of the
info = pyr.rpackages.utils.data()
names = ['Package', 'LibPath', 'Item', 'Description']
data = np.fliplr(np.array(info[2]).reshape(4, len(info[2]) // 4))
return pd.DataFrame(data=data.T, columns=names).drop(columns=['LibPath'])
def _load(dataset_name: str, package_name: str = None):
"""
Load an R-dataset and retrieve it as a pandas dataframe. Row-names
(similar to pandas
object index) are included on purpose as they may be an important identifier
of the sample (e.g., car model in the mtcars dataset).
@type dataset_name: str
@rtype pd.core.frame.DataFrame
"""
if package_name is None:
available = get_available_datasets()
available = available.loc[available['Item'] == dataset_name,
'Package']
if available.shape[0] == 0:
# TODO Not really a viable case but we should test for this.
raise RuntimeError('No matching data set was found.')
if available.shape[0] > 1:
# TODO Not really a viable case but we should test for this.
raise RuntimeError(f'More than one data set was found for the entry {dataset_name}. '
'Use the `package_name` argument to specify the dataset you want to load: '
f"{', '.join(available.values)}")
else:
package_name = available.item()
# TODO - REFACTOR THIS
with ro.conversion.localconverter(ro.default_converter + ro.pandas2ri.converter):
return utils.convert_df(utils.convert_df(pyr.rpackages.data(
getattr(pyr.rpackages, package_name)).fetch(
dataset_name)[dataset_name], 'dataset_rownames'), 'dataset_rownames') | /robusta_stats-0.0.4.tar.gz/robusta_stats-0.0.4/robusta/misc/datasets.py | 0.471953 | 0.528838 | datasets.py | pypi |
import numpy as np
from sklearn.linear_model._base import LinearModel
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.preprocessing import StandardScaler
# https://gist.github.com/agramfort/2351057
__all__ = ['NNGRegressor']
def non_negative_garotte(X, y, alpha, tol=1e-6, max_iter=1000):
# Ordinart Least Squares coefficients
coef_ols = LinearRegression(fit_intercept=False).fit(X, y).coef_
X = X * coef_ols[np.newaxis, :]
# Shrunken betas
shrink_coef = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
positive=True, tol=tol, max_iter=max_iter).fit(X, y).coef_
coef = coef_ols * shrink_coef
# Residual Sum of Squares
rss = np.sum((y - np.dot(X, coef)) ** 2)
return coef, shrink_coef, rss
class NNGRegressor(LinearModel):
"""Non-Negative Garrote Regressor
Code source : https://gist.github.com/agramfort/2351057
Ref:
Breiman, L. (1995), "Better Subset Regression Using the Nonnegative
Garrote," Technometrics, 37, 373-384. [349,351]
Parameters
----------
alpha : float, optional (default 1e-3)
Constant that multiplies the L1 term. Defaults to 1.0. alpha = 0 is
equivalent to an ordinary least square,
solved by the LinearRegression object. For numerical reasons, using
alpha = 0 with the Lasso object is not
advised. Given this, you should use the LinearRegression object.
fit_intercept : boolean, optional (default True)
Whether to calculate the intercept for this model. If set to False, no
intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional (default False)
This parameter is ignored when fit_intercept is set to False. If True,
the regressors X will be normalized
before regression by subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
sklearn.preprocessing.StandardScaler before calling fit on an estimator
with normalize=False.
tol : float, optional (default: 1e-6)
The tolerance for the optimization: if the updates are smaller than
tol, the optimization code checks
the dual gap for optimality and continues until it is smaller than tol.
max_iter : int, optional (default: 1000)
The maximum number of iterations.
copy_X : boolean, optional (default True)
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features, ) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
If multiple targets are passed during the fit (y 2D), this
is a 2D array of shape (n_targets, n_features), while if only
one target is passed, this is a 1D array of length n_features.
intercept_ : array
Independent term in the linear model.
"""
def __init__(self, alpha=1e-3, fit_intercept=True, normalize=False,
tol=1e-4, max_iter=1000, copy_X=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.tol = tol
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
def fit(self, X, y):
'''
X : array-like, shape = (n_samples, n_features)
y : array-like, shape = (n_samples, )
'''
X, y, X_mean, y_mean, X_std = self._preprocess_data(X, y,
self.fit_intercept, self.normalize, self.copy_X)
self.coef_, self.shrink_coef_, self.rss_ = non_negative_garotte(X, y,
alpha=self.alpha, tol=self.tol, max_iter=self.max_iter)
self._set_intercept(X_mean, y_mean, X_std)
return self | /robusta-0.0.1.tar.gz/robusta-0.0.1/linear_model/nng.py | 0.881997 | 0.707582 | nng.py | pypi |
import pandas as pd
import numpy as np
from sklearn.base import ClassifierMixin, RegressorMixin
from sklearn.linear_model._base import LinearModel
from sklearn.metrics import get_scorer
from sklearn.base import ClassifierMixin, RegressorMixin
from sklearn.linear_model._base import LinearModel
from sklearn.metrics import get_scorer
from tqdm import tqdm_notebook as tqdm
__all__ = [
'CaruanaRegressor',
'CaruanaClassifier',
]
class _BaseCaruana(LinearModel):
'''Caruana Ensemble Selection for Regression/Classification
Paper: https://www.cs.cornell.edu/~caruana/ctp/ct.papers/caruana.icml04.icdm06long.pdf
Parameters
----------
scoring : str
Objective for optimization.
iters : int (default=100)
Number of models in ensemble.
init_iters : int (default=10)
Number of core models in ensemble, which selected from whole set
of models at the beginning. Values from range 5-25 are prefered.
Set 0 for basic algorithm.
colsample : float or int (default=0.5)
Number of models, sampled on each iteration. Must be from range (0, 1].
Set 1.0 for basic algorithm.
replace : bool (default=True)
Whether to reuse models, already added to the ensemble (recommended).
Set False for basic algorithm.
random_state : int, RandomState instance, or None (default=None)
Pseudo-random number generator to control the subsample of models.
verbose : int (default=1)
Verbosity level.
n_jobs : int or None (default=None)
The number of jobs to use for the computation.
`None` means 1. `-1` means using all processors.
tqdm : bool (default=False)
Whether to show progress bar.
Attributes
----------
weights_ : list of int
Number of times each model was used.
y_avg_ : float
Target bias
'''
def __init__(self, scoring, iters=100, init_iters=10, colsample=0.5,
replace=True, random_state=None, n_jobs=-1, tqdm=False):
self.iters = iters
self.init_iters = init_iters
self.scoring = scoring
self.colsample = colsample
self.replace = replace
self.random_state = random_state
self.n_jobs = n_jobs
self.tqdm = tqdm
def fit(self, X, y):
"""
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
Stacked predictions.
y : DataFrame or Series, shape [n_samples, ] or [n_samples, n_classes]
Target variable
Returns
-------
self
"""
# Check data
if self._estimator_type is 'classifier':
self.classes_ = np.unique(y)
self.scorer = get_scorer(self.scoring)
self.weights_ = np.zeros(X.shape[1])
self.y_avg_ = y.mean()
msg = "<init_iters> must be no more than <iters>"
assert self.init_iters <= self.iters, msg
if not self.replace:
msg = "<iters> must be no more than X.shape[1] (if replace=True)"
assert self.iters <= X.shape[1], msg
# Initial subset
scores = {}
for k in range(X.shape[1]):
self.weights_[k] += 1
scores[k] = self.score(X, y)
self.weights_[k] -= 1
scores = pd.Series(scores).sort_values(ascending=False)
scores = scores[:self.init_iters]
self.weights_[scores.index] += 1
# Core Algorithm
i_range = range(self.init_iters, self.iters)
if self.tqdm:
i_range = tqdm(i_range, initial=self.init_iters, total=self.iters)
for i in i_range:
k_range = np.arange(X.shape[1])
if not self.replace:
k_range = k_range[self.weights_ == 0]
if self.colsample < 1.0:
p = 1 + int(len(k_range) * self.colsample)
k_range = np.random.choice(k_range, p, replace=False)
best_score = None
best_k = -1
for k in k_range:
self.weights_[k] += 1
score = self.score(X, y)
self.weights_[k] -= 1
if best_k < 0 or best_score < score:
best_score = score
best_k = k
self.weights_[best_k] += 1
return self
def score(self, X, y):
return self.scorer(self, X, y)
def _blend(self, X):
return X.dot(self.coef_).values + self.intercept_
@property
def coef_(self):
if self.weights_.any():
return np.array(self.weights_) / np.sum(self.weights_)
else:
return self.weights_
@property
def intercept_(self):
return 0.0 if self.coef_.any() else self.y_avg_
class CaruanaRegressor(_BaseCaruana, RegressorMixin):
def predict(self, X):
return self._blend(X)
class CaruanaClassifier(_BaseCaruana, ClassifierMixin):
def predict_proba(self, X):
y = self._blend(X)
return np.stack([1-y, y], axis=-1)
def predict(self, X):
y = self.predict_proba(X)
return np.rint(y[:, 1]).astype(int) | /robusta-0.0.1.tar.gz/robusta-0.0.1/linear_model/caruana.py | 0.868799 | 0.448245 | caruana.py | pypi |
import pandas as pd
import numpy as np
from sklearn.base import ClassifierMixin, RegressorMixin
from sklearn.linear_model._base import LinearModel
from sklearn.metrics import get_scorer
from scipy.optimize import minimize
from robusta.preprocessing import QuantileTransformer
from robusta.pipeline import make_pipeline
__all__ = [
'BlendRegressor',
'BlendClassifier',
]
class _BaseBlend(LinearModel):
def __init__(self, avg_type='mean', scoring=None, opt_func=None, **opt_kws):
self.avg_type = avg_type
self.scoring = scoring
self.opt_func = opt_func
self.opt_kws = opt_kws
def fit(self, X, y, weights=None):
"""
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
Stacked predictions.
y : DataFrame or Series, shape [n_samples, ] or [n_samples, n_classes]
Target variable
Returns
-------
self
"""
if self._estimator_type is 'classifier':
self.classes_ = np.unique(y)
self.avg = check_avg_type(self.avg_type)
self.n_features_ = X.shape[1]
self.set_weights(weights)
if self.scoring:
self.scorer = get_scorer(self.scoring)
objective = lambda w: -self.set_weights(w).score(X, y)
if self.opt_func is None:
self.opt_func = minimize
self.opt_kws = dict(x0=self.get_weights(), method='SLSQP',
options={'maxiter': 1000}, bounds=[(0., 1.)] * self.n_features_,
constraints=[{'type': 'eq', 'fun': lambda w: np.sum(w)-1}])
self.result_ = self.opt_func(objective, **self.opt_kws)
self.set_weights(self.result_['x'])
return self
def set_weights(self, weights):
if weights is None:
self.coef_ = np.ones(self.n_features_) / self.n_features_
else:
self.coef_ = np.array(weights) / np.sum(weights)
return self
def get_weights(self):
return self.coef_
def score(self, X, y):
return self.scorer(self, X, y)
@property
def _is_fitted(self):
return (hasattr(self, 'result_') or not self.scoring)
@property
def intercept_(self):
return .0
def _blend(self, X):
return self.avg(X, self.coef_).values
class BlendRegressor(_BaseBlend, RegressorMixin):
'''Blending Estimator for regression
Parameters
----------
avg_type : string or callable (default='mean')
Select weighted average function:
- 'mean': Arithmetic mean
- 'hmean': Harmonic mean
- 'gmean': Geometric mean
If passed callable, expected signature: f(X, weights) -> y.
scoring : string or None (default=None)
Objective for optimization. If None, all weights are equal. Otherwise,
calculate the optimal weights for blending. Differentiable scoring are
prefered.
opt_func : function (default=None)
Optimization function. First argument should take objective to minimize.
Expected signature: f(objective, **opt_kws). If not passed, but scoring
is defined, used scipy's <minimize> function with method 'SLSQP'.
Should return result as dict with key 'x' as optimal weights.
opt_kws : dict, optional
Parameters for <opt_func> function.
Attributes
----------
coef_ : Series, shape (n_features, )
Estimated weights of blending model.
n_iters_ : int
Number of evaluations
result_ : dict
Evaluation results
'''
def predict(self, X):
return self._blend(X)
class BlendClassifier(_BaseBlend, ClassifierMixin):
'''Blending Estimator for classification
Parameters
----------
avg_type : string or callable (default='mean')
Select weighted average function:
- 'mean': Arithmetic mean
- 'hmean': Harmonic mean
- 'gmean': Geometric mean
If passed callable, expected signature: f(X, weights) -> y.
scoring : string or None (default=None)
Objective for optimization. If None, all weights are equal. Otherwise,
calculate the optimal weights for blending. Differentiable scoring are
prefered.
opt_func : function (default=None)
Optimization function. First argument should take objective to minimize.
Expected signature: f(objective, **opt_kws). If not passed, but scoring
is defined, used scipy's <minimize> function with method 'SLSQP'.
Should return result as dict with key 'x' as optimal weights.
opt_kws : dict, optional
Parameters for <opt_func> function.
Attributes
----------
coef_ : Series, shape (n_features, )
Estimated weights of blending model.
n_iters_ : int
Number of evaluations
result_ : dict
Evaluation results
'''
def predict_proba(self, X):
y = self._blend(X)
return np.stack([1-y, y], axis=-1)
def predict(self, X):
y = self.predict_proba(X)
return np.rint(y[:, 1]).astype(int)
AVG_TYPES = {
'mean': lambda X, w: X.dot(w),
'hmean': lambda X, w: 1/(X**-1).dot(w),
'gmean': lambda X, w: np.exp(np.log(X).dot(w)),
}
def check_avg_type(avg_type):
avg_types = list(AVG_TYPES.keys())
if avg_type in avg_types:
return AVG_TYPES[avg_type]
else:
raise ValueError("Invalid value '{}' for <avg_type>. Allowed values: "
"".format(avg_types)) | /robusta-0.0.1.tar.gz/robusta-0.0.1/linear_model/blend.py | 0.850608 | 0.435241 | blend.py | pypi |
from sklearn.model_selection import ParameterSampler, ParameterGrid
from sklearn.base import BaseEstimator, clone
import optuna, hyperopt
import scipy
from time import time
from collections.abc import Iterable
from numbers import Number
import pandas as pd
import numpy as np
from ._verbose import _print_last
from ._plot import _plot_progress
from robusta.testing import extract_param_space
from robusta.crossval import crossval_score
class BaseOptimizer(BaseEstimator):
'''Hyper-parameters Optimizer
Parameters
----------
estimator : estimator object
The object to use to fit the data.
cv : int, cross-validation generator or an iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
scoring : string, callable or None, optional, default: None
A string or a scorer callable object / function with signature
``scorer(estimator, X, y)`` which should return only a single value.
If None, the estimator's default scorer (if available) is used.
param_space : dict or None (default=None)
Parameters bounds:
- 'uniform':
Uniform distribution [a, b].
Pass 2 values (a, b) as tuple.
- 'quniform':
Uniform distribution [a, b] with step q.
Pass 3 values (a, b, q) as tuple.
- 'quniform_int':
Uniform distribution [a, b] with integer step q=1.
Pass 3 values (a, b, 1) as tuple of integers.
- 'loguniform':
Log-uniform distribution [log10(a), log10(b)].
Pass 3 values (a, b, 'log') as tuple.
- 'choice':
Set of options A, B, C & etc.
Pass several values {A, B, C, ...} as set.
- 'const':
Constant value.
Pass single value (int, float, string, None, ...).
If <param_space> set to None, use automatic parameters setting.
warm_start : bool (default: False)
If True, continue optimization after last <fit> call. If False, reset
trials history and start new optimization.
Warning: if last <fit> call ended with iteration / time limit exceed,
you should change them before new <fit> call.
max_time : int or NoneType (default: None)
Stop optimization after given number of seconds.
None means no time limitation. If <max_iter> is also set to None,
the optimization continues to create trials until it receives a
termination signal such as Ctrl+C or SIGTERM.
max_iter : int or NoneType (default: None)
Stop optimization after given number of evaluations (iterations).
None means no iterations limitation. If <max_time> is also set to None,
the optimization continues to create trials until it receives a
termination signal such as Ctrl+C or SIGTERM.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel (for inner cross-validation).
None means 1.
verbose : int, optional (default: 1)
Verbosity level:
0: No output
1: Print time, iters, score & eta
2: Also print trial's parameters
3: Also print cv output for each fold
n_digits : int (default=4)
Verbose score(s) precision
Attributes
----------
X_, y_, groups_ : ndarray-like, array-like, array-like
Last fit data
best_estimator_ : estimator
Estimator with best params
trials_ : DataFrame
Params, score, time & cv results:
- 'params' : dict
Estimated parameters
- 'score', 'std' : float
Mean score of cross-validation & it's standard deviation
- 'time' : float
Fitting duration (# of sec)
- 'status': string
Final status of trial ('ok', 'timeout', 'fail' or 'interrupted')
n_trials_ : int
Total number of trials
best_score_ : float
Best score
best_params_ : dict
Best parameters
best_trial_ : int
Index of best trial
total_time_ : float
Total optimization time
'''
def __init__(self, estimator, cv=5, scoring=None, param_space=None,
warm_start=False, max_time=None, max_iter=None, n_jobs=None,
verbose=1, n_digits=4, debug=False):
self.estimator = estimator
self.param_space = param_space
self.warm_start = warm_start
self.cv = cv
self.scoring = scoring
self.max_time = max_time
self.max_iter = max_iter
self.verbose = verbose
self.n_digits = n_digits
self.n_jobs = n_jobs
self.debug = debug
def eval_params(self, params, X, y, groups=None):
self._check_max_iter()
self._check_max_time()
tic = time()
try:
params = fix_params(params, self.param_space_)
estimator = clone(self.estimator).set_params(**params)
scores = crossval_score(estimator, self.cv, X, y, groups,
self.scoring, n_jobs=self.n_jobs,
verbose=0)
trial = {
'params': params,
'status': 'ok',
'time': time() - tic,
'score': np.mean(scores),
'score_std': np.std(scores),
'scores': scores,
}
self._append_trial(trial)
_print_last(self)
return trial['score']
except KeyboardInterrupt:
raise KeyboardInterrupt
trial = {
'params': params,
'status': 'fail',
'time': time() - tic,
}
self._append_trial(trial)
_print_last(self)
return np.nan
except:
if self.debug:
raise
def _append_trial(self, trial):
self.trials_ = self.trials_.append(trial, ignore_index=True)
@property
def best_iter_(self):
return self.trials_['score'].idxmax()
@property
def best_score_(self):
return self.trials_['score'][self.best_iter_]
@property
def best_params_(self):
return self.trials_['params'][self.best_iter_]
@property
def best_estimator_(self):
return clone(self.estimator).set_params(**self.best_params_)
@property
def n_iters_(self):
return len(self.trials_) if hasattr(self, 'trials_') else 0
@property
def total_time_(self):
return self.trials_['time'].sum() if hasattr(self, 'trials_') else .0
@property
def predict(self):
return self.trials_['time'].sum() if hasattr(self, 'trials_') else .0
def _check_max_iter(self):
if hasattr(self, 'max_iter') and self.max_iter:
if self.max_iter <= self.n_iters_:
if self.verbose: print('Iterations limit exceed!')
raise KeyboardInterrupt
def _check_max_time(self):
if hasattr(self, 'max_time') and self.max_time:
if self.max_time <= self.total_time_:
if self.verbose: print('Time limit exceed!')
raise KeyboardInterrupt
def fit(self, X, y, groups=None):
# Check if params set to auto
self.param_space_ = self.param_space
if not self.param_space_:
self.param_space_ = extract_param_space(self.estimator, verbose=self.verbose)
# Define new space
if not self.warm_start or not hasattr(self, 'btypes'):
self.btypes = get_bound_types(self.param_space_)
self.space = self._get_space(self.param_space_)
# Reset trials
if not self.warm_start or not hasattr(self, 'trials_'):
self.trials_ = pd.DataFrame()
# Custom core fit
self._fit(X, y, groups)
return self
def plot(self, **kwargs):
_plot_progress(self, **kwargs)
def get_bound_types(space):
'''
Get parameter's type
- 'uniform': uniform distribution [a, b]
- 'quniform': uniform distribution [a, b] with step q
- 'quniform_int': uniform distribution [a, b] with integer step q
- 'loguniform': log-uniform distribution [log10(a), log10(b)]
- 'choice' : set of options {A, B, C, ...}
- 'const': any single value
Args
----
space : dict
Boundaries
Returns
-------
btypes : dict
Boundaries type
'''
btypes = {}
for param, bounds in space.items():
if isinstance(bounds, str):
btype = 'const'
elif isinstance(bounds, Iterable):
if isinstance(bounds, set):
btype = 'choice'
elif isinstance(bounds, tuple):
if len(bounds) == 2:
btype = 'uniform'
elif len(bounds) == 3:
if bounds[2] == 'log':
btype = 'loguniform'
elif isinstance(bounds[2], int):
btype = 'quniform_int'
elif isinstance(bounds[2], Number):
btype = 'quniform'
else:
raise ValueError('Unknown bounds type: {}'.format(bounds))
else:
raise ValueError('Unknown bounds type: {}'.format(bounds))
else:
raise ValueError('Unknown bounds type: {}'.format(bounds))
else:
btype = 'const'
btypes[param] = btype
return btypes
def fix_params(params, space):
'''
Normalize parameters value according to defined space:
- 'quniform': round param value with defined step
- 'constant': replace parameter's value with defined constant
Args
----
params : dict
Parameters
space : dict
Boundaries
Returns
-------
fixed_params : dict
Normalized parameters
'''
params = dict(params)
btypes = get_bound_types(space)
for param, bounds in space.items():
if btypes[param] in ['quniform', 'quniform_int']:
a, b, q = bounds
params[param] = qround(params[param], a, b, q)
elif btypes[param] is 'const':
params[param] = bounds
return params
def ranking(ser):
'''Make rank transformation.
Args
----
ser : Series of float
Values for ranking. None interpreted as worst.
Returns
-------
rnk : Series of int
Ranks (1: highest, N: lowest)
'''
ser = ser.fillna(ser.min())
rnk = ser.rank(method='dense', ascending=False)
rnk = rnk.astype(int)
return rnk
def qround(x, a, b, q, decimals=4):
'''
Convert x to one of [a, a+q, a+2q, .., b]
Args
----
x : int or float
Input value. x must be in [a, b].
If x < a, x set to a.
If x > b, x set to b.
a, b : int or float
Boundaries. b must be greater than a. Otherwize b set to a.
q : int or float
Step value. If q and a are both integer, x set to integer too.
decimals : int, optional (default: 4)
Number of decimal places to round to.
Returns
-------
x_new : int or float
Rounded value
'''
# Check if a <= x <= b
b = max(a, b)
x = min(max(x, a), b)
# Round x (with defined step q)
x = a + ((x - a)//q)*q
x = round(x, decimals)
# Convert x to integer
if isinstance(a + q, int):
x = int(x)
return x | /robusta-0.0.1.tar.gz/robusta-0.0.1/optimizer/base.py | 0.813275 | 0.469095 | base.py | pypi |
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, clone
from robusta.importance import get_importance
__all__ = ['TransformedTargetRegressor']
class TransformedTargetRegressor(BaseEstimator, RegressorMixin):
"""Meta-estimator to regress on a transformed target.
Useful for applying a non-linear transformation in regression problems. This
transformation can be given as a Transformer such as the QuantileTransformer
or as a function and its inverse such as ``log`` and ``exp``.
The computation during ``fit`` is::
regressor.fit(X, func(y))
The computation during ``predict`` is::
inverse_func(regressor.predict(X))
Parameters
----------
regressor : object, default=LinearRegression()
Regressor object such as derived from ``RegressorMixin``. This
regressor will automatically be cloned each time prior to fitting.
func : function, optional
Function to apply to ``y`` before passing to ``fit``. Cannot be set at
the same time as ``transformer``. The function needs to return a
2-dimensional array. If ``func`` is ``None``, the function used will be
the identity function.
inverse_func : function, optional
Function to apply to the prediction of the regressor. Cannot be set at
the same time as ``transformer`` as well. The function needs to return
a 2-dimensional array. The inverse function is used to return
predictions to the same space of the original training labels.
Attributes
----------
regressor_ : object
Fitted regressor.
"""
def __init__(self, regressor=None, func=None, inverse_func=None):
self.regressor = regressor
self.func = func
self.inverse_func = inverse_func
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : DataFrame, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : Series, shape (n_samples,)
Target values.
Returns
-------
self : object
"""
if hasattr(X, 'index'):
self.return_df = True
self.y_name = y.name
y = self.func(y)
self.regressor_ = clone(self.regressor).fit(X, y)
return self
def predict(self, X):
"""Predict using the base regressor, applying inverse.
The regressor is used to predict and the ``inverse_func`` is applied
before returning the prediction.
Parameters
----------
X : DataFrame, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
y_pred : Series, shape (n_samples,)
Target values.
"""
y = self.regressor_.predict(X)
y = self.inverse_func(y)
if self.return_df:
y = pd.Series(y, name=self.y_name, index=X.index)
return y
@property
def feature_importances_(self):
return self.regressor_.feature_importances_
@property
def coef_(self):
return self.regressor_.coef_ | /robusta-0.0.1.tar.gz/robusta-0.0.1/pipeline/_target.py | 0.898805 | 0.695168 | _target.py | pypi |
import pandas as pd
import numpy as np
from joblib import Parallel, delayed
from sklearn.utils.metaestimators import _BaseComposition
from sklearn.base import TransformerMixin, clone
from sklearn.pipeline import _name_estimators
__all__ = ['FeatureUnion', 'make_union']
class FeatureUnion(_BaseComposition, TransformerMixin):
'''Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters of the transformers may be set using its name and the parameter
name separated by a '__'. A transformer may be replaced entirely by
setting the parameter with its name to another transformer,
or removed by setting to 'drop' or ``None``.
Parameters
----------
transformers : list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs : int or None, optional (default=-1)
The number of jobs to run in parallel. None means 1.
Attributes
----------
named_transformers_ : Bunch object, a dictionary with attribute access
Access the fitted transformer by name.
'''
def __init__(self, transformers, n_jobs=None, **kwargs):
self.transformers = transformers
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : DataFrame of shape [n_samples, n_features]
Input data, of which specified subsets are used to fit the transformers.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
self : FeatureUnion
This estimator
"""
names = [name for name, _ in self.transformers]
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(self._fit)(clone(transformer), X, y)
for _, transformer in self.transformers)
self.named_transformers_ = dict(zip(names, transformers))
return self
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : DataFrame of shape [n_samples, n_features]
Input data, of which specified subsets are used to fit the transformers.
Returns
-------
Xt : DataFrame, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xt_list = Parallel(n_jobs=self.n_jobs)(
delayed(self._transform)(transformer, X)
for transformer in self.named_transformers_.values())
Xt = pd.concat(Xt_list, axis=1)
return Xt
def fit_transform(self, X, y=None):
"""Fit & transform X separately by each transformer, concatenate results.
Parameters
----------
X : DataFrame of shape [n_samples, n_features]
Input data, of which specified subsets are used to fit the transformers.
Returns
-------
Xt : DataFrame, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
names = [name for name, _ in self.transformers]
paths = Parallel(n_jobs=self.n_jobs)(
delayed(self._fit_transform)(clone(transformer), X, y)
for _, transformer in self.transformers)
transformers, Xt_list = zip(*paths)
self.named_transformers_ = dict(zip(names, transformers))
Xt = pd.concat(Xt_list, axis=1)
return Xt
def _fit_transform(self, transformer, X, y):
Xt = transformer.fit_transform(X, y)
return transformer, Xt
def _fit(self, transformer, X, y):
return transformer.fit(X, y)
def _transform(self, transformer, X):
return transformer.transform(X)
def get_params(self, deep=True):
return self._get_params('transformers', deep=deep)
def set_params(self, **kwargs):
self._set_params('transformers', **kwargs)
return self
def make_union(*transformers, **kwargs):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Parameters
----------
*transformers : list of estimators
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
f : FeatureUnion
"""
n_jobs = kwargs.pop('n_jobs', None)
if kwargs:
# We do not currently support `transformer_weights` as we may want to
# change its type spec in make_union
raise TypeError('Unknown keyword arguments: "{}"'.format(list(kwargs.keys())[0]))
return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs) | /robusta-0.0.1.tar.gz/robusta-0.0.1/pipeline/_feature_union.py | 0.873309 | 0.499146 | _feature_union.py | pypi |
import pandas as pd
import numpy as np
from sklearn.utils.metaestimators import _BaseComposition
from sklearn.base import TransformerMixin, clone
from sklearn.pipeline import _name_estimators
from ..preprocessing import Identity, ColumnSelector
__all__ = ['ColumnTransformer', 'make_column_transformer']
class ColumnTransformer(_BaseComposition, TransformerMixin):
'''Applies transformers to columns of an array or pandas DataFrame.
This estimator allows different columns or column subsets of the input
to be transformed separately and the features generated by each transformer
will be concatenated to form a single feature space. This is useful for
combining several feature extraction mechanisms or transformations
into a single transformer.
Parameters
----------
transformer_list : list
List of (string, transformer, columns) tuples (implementing fit/transform).
remainder : {'drop', 'pass'} or estimator, default 'drop'
By default, only the specified columns in `transformer_list` are
transformed and combined in the output, and the non-specified
columns are dropped. (default of ``'drop'``).
By specifying ``remainder='pass'``, all remaining columns that
were not specified in `transformer_list` will be automatically passed
through. This subset of columns is concatenated with the output of
the transformers.
By setting ``remainder`` to be an estimator, the remaining
non-specified columns will use the ``remainder`` estimator. The
estimator must support `fit` and `transform`.
Attributes
----------
transformers_ : list
The collection of fitted transformers as tuples of (name, fitted_transformer,
column). fitted_transformer can be an estimator, ‘drop’, or ‘pass’.
In case there were no columns selected, this will be the unfitted transformer.
If there are remaining columns, the final element is a tuple of the form:
(‘remainder’, transformer, remaining_columns_) corresponding to the remainder
parameter. If there are remaining columns, then
len(transformers_)==len(transformers)+1, otherwise
len(transformers_)==len(transformers).
named_transformers_ : Bunch object, a dictionary with attribute access
Access the fitted transformer by name.
remaining_columns_ : list of strings
List of remining columns.
'''
def __init__(self, transformer_list, remainder='drop', **kwargs):
self.transformer_list = transformer_list
self.remainder = remainder
@property
def _transformers(self):
"""
Internal list of transformer only containing the name and
transformers, dropping the columns. This is for the implementation
of get_params via BaseComposition._get_params which expects lists
of tuples of len 2.
"""
return [(name, trans) for name, trans, _ in self.transformer_list]
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : DataFrame of shape [n_samples, n_features]
Input data, of which specified subsets are used to fit the transformers.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
self : ColumnTransformer
This estimator
"""
self.transformers_ = []
self.named_transformers_ = {}
self.remaining_columns_ = set(X.columns)
for name, transformer, cols in self.transformer_list:
# Clone & fit
fitted_transformer = clone(transformer).fit(X[cols], y)
self.named_transformers_[name] = fitted_transformer
# Access by key
fitted_tuple = (name, fitted_transformer, cols)
self.transformers_.append(fitted_tuple)
# Remainin columns
self.remaining_columns_ -= set(cols)
self.remaining_columns_ = list(self.remaining_columns_)
if self.remaining_columns_:
name, cols = 'remainder', self.remaining_columns_
if hasattr(self.remainder, 'fit') and hasattr(self.remainder, 'transform'):
fitted_transformer = clone(self.remainder).fit(X[cols], y)
elif self.remainder is 'pass':
fitted_transformer = Identity().fit(X[cols], y)
elif self.remainder is 'drop':
fitted_transformer = ColumnSelector(cols=[]).fit(X[cols], y)
else:
raise ValueError('Unknown type for remainder. Must be "drop", "pass" or estimator.')
fitted_tuple = (name, fitted_transformer, cols)
self.transformers_.append(fitted_tuple)
return self
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : DataFrame of shape [n_samples, n_features]
Input data, of which specified subsets are used to fit the transformers.
Returns
-------
Xt : DataFrame, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xt_list = []
for name, transformer, cols in self.transformers_:
Xt_list.append(transformer.transform(X[cols]))
Xt = pd.concat(Xt_list, axis=1)
return Xt
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params('_transformers', deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``.
Returns
-------
self
"""
self._set_params('_transformers', **kwargs)
return self
def _get_transformer_list(estimators):
"""
Construct (name, trans, column) tuples from list
"""
message = ('`make_column_transformer` expects (transformer, columns)')
transformers, columns = zip(*estimators)
names, _ = zip(*_name_estimators(transformers))
transformer_list = list(zip(names, transformers, columns))
return transformer_list
def make_column_transformer(*transformers, **kwargs):
"""Construct a ColumnTransformer from the given transformers.
This is a shorthand for the ColumnTransformer constructor; it does not
require, and does not permit, naming the transformers. Instead, they will
be given names automatically based on their types. It also does not allow
weighting with ``transformer_weights``.
Parameters
----------
*transformers : tuples of transformers and column selections
remainder : {'drop', 'pass'} or estimator, default 'drop'
By default, only the specified columns in `transformers` are
transformed and combined in the output, and the non-specified
columns are dropped. (default of ``'drop'``).
By specifying ``remainder='pass'``, all remaining columns that
were not specified in `transformers` will be automatically passed
through. This subset of columns is concatenated with the output of
the transformers.
By setting ``remainder`` to be an estimator, the remaining
non-specified columns will use the ``remainder`` estimator. The
estimator must support `fit` and `transform`.
sparse_threshold : float, default = 0.3
If the transformed output consists of a mix of sparse and dense data,
it will be stacked as a sparse matrix if the density is lower than this
value. Use ``sparse_threshold=0`` to always return dense.
When the transformed output consists of all sparse or all dense data,
the stacked result will be sparse or dense, respectively, and this
keyword will be ignored.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ct : ColumnTransformer
"""
# transformer_weights keyword is not passed through because the user
# would need to know the automatically generated names of the transformers
n_jobs = kwargs.pop('n_jobs', None)
remainder = kwargs.pop('remainder', 'drop')
sparse_threshold = kwargs.pop('sparse_threshold', 0.3)
if kwargs:
raise TypeError('Unknown keyword arguments: "{}"'.format(list(kwargs.keys())[0]))
transformer_list = _get_transformer_list(transformers)
return ColumnTransformer(transformer_list, n_jobs=n_jobs, remainder=remainder,
sparse_threshold=sparse_threshold) | /robusta-0.0.1.tar.gz/robusta-0.0.1/pipeline/_column_transformer.py | 0.886807 | 0.496399 | _column_transformer.py | pypi |
import pandas as pd
import numpy as np
from sklearn.model_selection import check_cv
from sklearn.exceptions import NotFittedError
from sklearn.base import clone, is_classifier
from robusta.importance import get_importance
from robusta.crossval import crossval
from .base import _Selector
# Original: sklearn.feature_selection.SelectFromModel
class SelectFromModel(_Selector):
"""Meta-transformer for selecting features based on importance weights.
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
This can be both a fitted (if cv='prefit') or a non-fitted estimator.
The estimator must have either a <feature_importances_> or <coef_>
attribute after fitting.
threshold : string, float, optional (default None)
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the <threshold> value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None, drop features
only based on <max_features>.
max_features : int, float or None, optional (default 0.5)
The maximum number of features selected scoring above <threshold>.
If float, interpreted as proportion of all features.
To disable <threshold> and only select based on <max_features>,
set <threshold> to -np.inf.
cv : int, cross-validation generator, iterable or "prefit"
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to disable cross-validation and train single estimator
on whole dataset (default).
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
- "prefit" string constant.
If "prefit" is passed, it is assumed that <estimator> has been
fitted already and <fit> function will raise error.
Attributes
----------
estimator_ : list of fitted estimators, or single fitted estimator
If <cv> is 'prefit'. If <cv> is None, return single estimator.
Otherwise return list of fitted estimators, length (n_folds, ).
feature_importances_ : Series of shape (n_features, )
Feature importances, extracted from estimator(s)
threshold_ : float
The threshold value used for feature selection
max_features_ : int
Maximum number of features for feature selection
use_cols_ : list of str
Columns to select
"""
def __init__(self, estimator, cv=None, threshold=None, max_features=None):
self.estimator = estimator
self.threshold = threshold
self.max_features = max_features
self.cv = cv
def fit(self, X, y, groups=None):
if self.cv is 'prefit':
raise NotFittedError("Since 'cv=prefit', call transform directly")
elif self.cv is None:
self.estimator_ = clone(self.estimator).fit(X, y)
else:
self.estimator_ = []
cv = check_cv(self.cv, y, is_classifier(self.estimator_))
for trn, _ in cv.split(X, y, groups):
X_trn, y_trn = X.iloc[trn], y.iloc[trn]
estimator = clone(self.estimator).fit(X_trn, y_trn)
self.estimator_.append(estimator)
return self
@property
def feature_importances_(self):
imps = []
if self.cv is 'prefit':
estimators = [self.estimator]
elif self.cv is None:
estimators = [self.estimator_]
else:
estimators = self.estimator_
for estimator in estimators:
imp = get_importance(estimator)
imps.append(imp)
return pd.concat(imps, axis=1).mean(axis=1)
def get_features(self):
imp = self.feature_importances_
self.threshold_ = _check_threshold(imp, self.threshold)
threshold_mask = (imp >= self.threshold_)
self.max_features_ = _check_max_features(imp, self.max_features)
ranking_mask = (imp.rank(ascending=False) <= self.max_features_)
use_cols = imp.index[threshold_mask & ranking_mask]
return list(use_cols)
def _check_max_features(importances, max_features):
"""Interpret the max_features value"""
n_features = len(importances)
if max_features is None:
max_features = n_features
elif isinstance(max_features, int):
max_features = min(n_features, max_features)
elif isinstance(max_features, float):
max_features = int(n_features * max_features)
return max_features
def _check_threshold(importances, threshold):
"""Interpret the threshold value"""
if threshold is None:
threshold = -np.inf
elif isinstance(threshold, str):
if "*" in threshold:
scale, reference = threshold.split("*")
scale = float(scale.strip())
reference = reference.strip()
if reference == "median":
reference = np.median(importances)
elif reference == "mean":
reference = np.mean(importances)
else:
raise ValueError("Unknown reference: " + reference)
threshold = scale * reference
elif threshold == "median":
threshold = np.median(importances)
elif threshold == "mean":
threshold = np.mean(importances)
else:
raise ValueError("Expected threshold='mean' or threshold='median' "
"got %s" % threshold)
else:
threshold = float(threshold)
return threshold | /robusta-0.0.1.tar.gz/robusta-0.0.1/selector/from_model.py | 0.811116 | 0.509947 | from_model.py | pypi |
import pandas as pd
import numpy as np
from sklearn.utils.random import check_random_state
from sklearn.exceptions import NotFittedError
from robusta.utils import logmsg
from .base import _WrappedSelector, _WrappedGroupSelector, _check_k_features
class GreedSelector(_WrappedSelector):
'''Greed Forward/Backward Feature Selector
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
cv : int, cross-validation generator or iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
scoring : string, callable or None, optional, default: None
A string or a scorer callable object / function with signature
``scorer(estimator, X, y)`` which should return only a single value.
If None, the estimator's default scorer (if available) is used.
forward : boolean (default=True)
Whether to start from empty set or full set of features:
- If <forward> is True, add feature on each step
- If <forward> is False, drop feature on each step
floating : boolean (default=False)
Whether to produce step back on each round (if increases score!):
- If <forward> is True, drop feature on each step
- If <forward> is False, drop feature on each step
k_features : int or float (default=0.5)
Number of features to select. If float, interpreted as percentage
of total # of features:
- If <forward> is True, <k_features> is maximum # of features.
- If <forward> is False, <k_features> is minimum # of features.
max_iter : int or None
Maximum number of iterations. None for no limits. Use <max_time>
or Ctrl+C for KeyboardInterrupt to stop optimization in this case.
max_time : float or None
Maximum time (in seconds). None for no limits. Use <max_iter>
or Ctrl+C for KeyboardInterrupt to stop optimization in this case.
use_best : bool (default=True)
Whether to use subset with best score or last selected subset.
random_state : int or None (default=0)
Random seed for permutations in PermutationImportance.
Ignored if <importance_type> set to 'inbuilt'.
n_jobs : int or None, optional (default=-1)
The number of jobs to run in parallel. None means 1.
verbose : int, optional (default=1)
Verbosity level
n_digits : int (default=4)
Verbose score(s) precision
'''
def __init__(self, estimator, cv=5, scoring=None, forward=True, floating=False,
k_features=0.5, max_time=None, use_best=True, random_state=0,
n_jobs=None, verbose=1, n_digits=4, cv_kwargs={}):
self.estimator = estimator
self.k_features = k_features
self.forward = forward
self.floating = floating
#self.max_candidates = max_candidates # TODO
self.max_time = max_time
self.use_best = use_best
self.cv = cv
self.cv_kwargs = cv_kwargs
self.scoring = scoring
self.random_state = random_state
self.n_jobs = n_jobs
self.verbose = verbose
self.n_digits = n_digits
def fit(self, X, y, groups=None):
self._fit_start(X)
self._fit(X, y, groups)
return self
def partial_fit(self, X, y, groups=None):
self._fit_start(X, partial=True)
self._fit(X, y, groups)
return self
def _fit_start(self, X, partial=False):
self._set_features(X)
self.k_features_ = _check_k_features(self.k_features, self.n_features_, 'k_features')
if not partial:
self.rstate_ = check_random_state(self.random_state)
self.subset_ = self.features_.copy()
if self.forward:
self.subset_.set_subset([])
self._reset_trials()
return self
def _fit(self, X, y, groups):
if self.forward:
is_final = lambda subset: len(subset) >= self.k_features_
else:
is_final = lambda subset: len(subset) <= self.k_features_
self.eval_subset(self.subset_, X, y, groups)
self.score_ = self.subset_.score
while not is_final(self.subset_):
# STEP 1. Step Forward/Backward
if self.verbose:
logmsg('STEP {}'.format('FORWARD' if self.forward else 'BACKWARD'))
if self.forward:
updates = self.features_.remove(*self.subset_)
else:
updates = self.subset_
# Find Next Best Update
score = -np.inf
subset = None
for feature in updates:
# Include/Exclude Feature
if self.forward:
candidate = self.subset_.append(feature)
else:
candidate = self.subset_.remove(feature)
candidate.parents = (self.subset_, )
# Evaluate Candidate
try:
self.eval_subset(candidate, X, y, groups)
if candidate.score > score:
score = candidate.score
subset = candidate
except KeyboardInterrupt:
raise
except:
pass
# Update Subset
self.subset_ = subset
self.score_ = score
# Stop Criteria
if not self.floating or is_final(self.subset_):
continue
# STEP 2. Step Backward/Forward
if self.verbose:
logmsg('STEP {}'.format('BACKWARD' if self.forward else 'FORWARD'))
if not self.forward:
updates = self.features_.remove(*self.subset_)
else:
updates = self.subset_
# Find Next Best Update
score = -np.inf
subset = None
for feature in updates:
# Exclude/Include Feature
if not self.forward:
candidate = self.subset_.append(feature)
else:
candidate = self.subset_.remove(feature)
candidate.parents = (self.subset_, )
# Check if Already Exsists
if candidate in self.trials_:
continue
# Evaluate Candidate
try:
self.eval_subset(candidate, X, y, groups)
if candidate.score > score:
score = candidate.score
subset = candidate
except KeyboardInterrupt:
raise
except:
pass
# Stop Criteria
if score < self.score_:
continue
# Update Subset
self.subset_ = subset
self.score_ = score
return self
def get_subset(self):
if (self.use_best is True) and hasattr(self, 'best_subset_'):
return self.best_subset_
elif (self.use_best is False) and len(self.subset_) > 0:
return self.last_subset_
else:
model_name = self.__class__.__name__
raise NotFittedError(f'{model_name} is not fitted')
class GroupGreedSelector(_WrappedGroupSelector, GreedSelector):
pass | /robusta-0.0.1.tar.gz/robusta-0.0.1/selector/greed.py | 0.513912 | 0.417925 | greed.py | pypi |
import pandas as pd
import numpy as np
from sklearn.utils.random import check_random_state
from sklearn.exceptions import NotFittedError
from .base import _WrappedSelector, _WrappedGroupSelector
class RandomSelector(_WrappedSelector):
'''Random feature selector for sampling and evaluating randomly choosen
feature subsets of specified size.
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
cv : int, cross-validation generator or iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
scoring : string, callable or None, optional, default: None
A string or a scorer callable object / function with signature
``scorer(estimator, X, y)`` which should return only a single value.
If None, the estimator's default scorer (if available) is used.
min_features, max_features : int or float
Minimum & maximum number of features. If float, interpreted as
percentage of total number of features. <max_features> must be greater
or equal to the <min_features>.
max_iter : int or None
Maximum number of iterations. None for no limits. Use <max_time>
or Ctrl+C for KeyboardInterrupt to stop optimization in this case.
max_time : float or None
Maximum time (in seconds). None for no limits. Use <max_iter>
or Ctrl+C for KeyboardInterrupt to stop optimization in this case.
weights : {'binomal', 'uniform'}
Probability for subset sizes:
- 'uniform': each # of features selected with equal probability
- 'binomal': each # of features selected with probability, which
proportional to # of different subsets of given size (binomal
coefficient nCk, where n - total # of features, k - subset size)
random_state : int
Random state for subsets generator
n_jobs : int or None, optional (default=-1)
The number of jobs to run in parallel. None means 1.
verbose : int, optional (default=1)
Verbosity level
Attributes
----------
features_ : list of string
Feature names
n_features_ : int
Total number of features
min_features_, max_features_ : int
Minimum and maximum subsets size
weights_ : Series
Subset sizes weights (not normalized)
rstate_ : object
Random state instance
trials_ : DataFrame
All evaluated subsets:
- 'subset': subset of feature names
- 'score': average cross-validation score
- 'time': fitting time
best_iter_: int
Best trial's index
best_score_: float
Best trial's score
best_subset_: list of string
Best subset of features
total_time_: float
Total optimization time (seconds)
'''
def __init__(self, estimator, cv=5, scoring=None, max_iter=20, max_time=None,
min_features=0.5, max_features=0.9, weights='uniform', n_jobs=-1,
random_state=0, verbose=1, n_digits=4, cv_kwargs={}):
self.estimator = estimator
self.min_features = min_features
self.max_features = max_features
self.max_iter = max_iter
self.max_time = max_time
self.weights = weights
self.cv = cv
self.cv_kwargs = cv_kwargs
self.scoring = scoring
self.random_state = random_state
self.n_jobs = n_jobs
self.verbose = verbose
self.n_digits = n_digits
def fit(self, X, y, groups=None):
self._fit_start(X)
self._fit(X, y, groups)
return self
def partial_fit(self, X, y, groups=None):
self._fit_start(X, partial=True)
self._fit(X, y, groups)
return self
def _fit(self, X, y, groups=None):
while True:
try:
k = weighted_choice(self.weights_, self.rstate_)
subset = self.features_.sample(size=k, random_state=self.rstate_)
self.eval_subset(subset, X, y, groups)
except KeyboardInterrupt:
break
return self
def _fit_start(self, X, partial=False):
if not partial:
self._reset_trials()
if not partial and hasattr(self, 'random_state'):
self.rstate_ = check_random_state(self.random_state)
self._set_features(X)
weights_vals = ['uniform', 'binomal']
if self.weights == 'binomal':
self.weights_ = binomal_weights(self.min_features_,
self.max_features_,
self.n_features_)
elif self.weights == 'uniform':
self.weights_ = uniform_weights(self.min_features_,
self.max_features_)
else:
raise ValueError(f'<weights> must be from {weights_vals}')
return self
def get_subset(self):
if hasattr(self, 'best_subset_'):
return self.best_subset_
else:
model_name = self.__class__.__name__
raise NotFittedError(f'{model_name} is not fitted')
class GroupRandomSelector(_WrappedGroupSelector, RandomSelector):
pass
fact = lambda x: x*fact(x-1) if x else 1
def nCk(n, k):
return fact(n) // fact(k) // fact(n-k)
def binomal_weights(k_min, k_max, n):
k_range = range(k_min, k_max+1)
C = [nCk(n, k) for k in k_range]
return pd.Series(C, index=k_range).sort_index()
def uniform_weights(k_min, k_max):
k_range = range(k_min, k_max+1)
return pd.Series(1, index=k_range).sort_index()
def weighted_choice(weights, rstate):
# weights ~ pd.Series
rnd = rstate.uniform() * weights.sum()
for i, w in weights.items():
rnd -= w
if rnd <= 0:
return i | /robusta-0.0.1.tar.gz/robusta-0.0.1/selector/random.py | 0.746786 | 0.640017 | random.py | pypi |
import numpy as np
import pandas as pd
from sklearn.utils.random import check_random_state
from deap import creator, base, tools, algorithms
from robusta.utils import logmsg, get_ranks, secfmt
from .base import _WrappedGroupSelector, _WrappedSelector
from ._plot import _plot_progress, _plot_subset
__all__ = ['GeneticSelector', 'GroupGeneticSelector']
def cxUniform(ind1, ind2, indpb=0.5, random_state=None, drop_attrs=['score']):
rstate = check_random_state(random_state)
mask1, mask2 = [], []
for x, y in zip(ind1.mask, ind2.mask):
if rstate.rand() < indpb:
mask1.append(x)
mask2.append(y)
else:
mask1.append(y)
mask2.append(x)
child1 = ind1.copy().set_mask(mask1)
child2 = ind2.copy().set_mask(mask2)
child1.parents = (ind1, ind2)
child2.parents = (ind1, ind2)
for attr in drop_attrs:
for child in [child1, child2]:
if hasattr(child, attr):
delattr(child, attr)
return child1, child2
def cxOnePoint(ind1, ind2, indpb=0.5, random_state=None, drop_attrs=['score']):
rstate = check_random_state(random_state)
n = ind1.n_features
argsort = rstate.permutation(n)
a = rstate.randint(n)
mask1 = np.zeros((n,), dtype=bool)
mask2 = np.zeros((n,), dtype=bool)
for i in range(n):
j = argsort[i]
x = ind1.mask[i]
y = ind2.mask[j]
if a <= i:
mask1[j] = x
mask2[j] = y
else:
mask1[j] = y
mask2[j] = x
child1 = ind1.copy().set_mask(mask1)
child2 = ind2.copy().set_mask(mask2)
child1.parents = (ind1, ind2)
child2.parents = (ind1, ind2)
for attr in drop_attrs:
for child in [child1, child2]:
if hasattr(child, attr):
delattr(child, attr)
return child1, child2
def cxTwoPoint(ind1, ind2, indpb=0.5, random_state=None, drop_attrs=['score']):
rstate = check_random_state(random_state)
n = ind1.n_features
argsort = rstate.permutation(n)
a = rstate.randint(n)
b = rstate.randint(n)
a, b = min(a, b), max(a, b)
mask1 = np.zeros((n,), dtype=bool)
mask2 = np.zeros((n,), dtype=bool)
for i in range(n):
j = argsort[i]
x = ind1.mask[i]
y = ind2.mask[j]
if a <= i <= b:
mask1[j] = x
mask2[j] = y
else:
mask1[j] = y
mask2[j] = x
child1 = ind1.copy().set_mask(mask1)
child2 = ind2.copy().set_mask(mask2)
child1.parents = (ind1, ind2)
child2.parents = (ind1, ind2)
for attr in drop_attrs:
for child in [child1, child2]:
if hasattr(child, attr):
delattr(child, attr)
return child1, child2
CROSSOVER = {
'one': cxOnePoint,
'two': cxTwoPoint,
'uni': cxUniform,
}
def mutSubset(ind, indpb, random_state=None, drop_attrs=['score']):
rstate = check_random_state(random_state)
mask = []
for x in ind.mask:
y = (rstate.rand() < indpb)
mask.append(x ^ y)
mutant = ind.set_mask(mask)
for attr in drop_attrs:
if hasattr(mutant, attr):
delattr(mutant, attr)
return mutant
class GeneticSelector(_WrappedSelector):
'''Feature Selector based on Differential Evolution algorithm
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
cv : int, cross-validation generator or iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
scoring : string, callable or None, optional, default: None
A string or a scorer callable object / function with signature
``scorer(estimator, X, y)`` which should return only a single value.
If None, the estimator's default scorer (if available) is used.
mut_freq : float [0..1], default=0.1
Percentage of mutants in population
mut_prob : float [0..1], default=0.05
Probability of mutation in single cell
crossover : float [0..1], default=0.5
Proportion of recombination of recombinations in population
pop_size : int, default=20
Population size (number of individuals)
max_iter : int or None
Maximum number of iterations. None for no limits. Use <max_time>
or Ctrl+C for KeyboardInterrupt to stop optimization in this case.
max_time : float or None
Maximum time (in seconds). None for no limits. Use <max_iter>
or Ctrl+C for KeyboardInterrupt to stop optimization in this case.
random_state : int or None (default=0)
Random seed for permutations in PermutationImportance.
Ignored if <importance_type> set to 'inbuilt'.
n_jobs : int or None, optional (default=-1)
The number of jobs to run in parallel. None means 1.
verbose : int, optional (default=1)
Verbosity level
n_digits : int (default=4)
Verbose score(s) precision
'''
def __init__(self, estimator, cv=5, scoring=None, n_gen=None, crossover='one',
min_features=0.1, max_features=0.9, pop_size=50, mutation=0.01,
max_time=None, random_state=None, n_jobs=None, verbose=1,
n_digits=4, cv_kwargs={}):
self.estimator = estimator
self.scoring = scoring
self.cv = cv
self.cv_kwargs = cv_kwargs
self.min_features = min_features
self.max_features = max_features
self.crossover = crossover
self.mutation = mutation
self.max_time = max_time
self.pop_size = pop_size
self.n_gen = n_gen
self.random_state = random_state
self.verbose = verbose
self.n_digits = n_digits
self.n_jobs = n_jobs
def fit(self, X, y, groups=None):
self._fit_start(X)
self._fit(X, y, groups)
return self
def partial_fit(self, X, y, groups=None):
self._fit_start(X, partial=True)
self._fit(X, y, groups)
return self
def _fit_start(self, X, partial=False):
self._set_features(X)
if not partial or not hasattr(self, 'trials_'):
self._reset_trials()
self.k_gen_ = 0
# Init toolbox
self.toolbox = base.Toolbox()
self.rstate = check_random_state(self.random_state)
# Define individual
k_min = self.min_features_
k_max = self.max_features_
def get_individual():
ind_size = self.rstate.choice(range(k_min, k_max+1))
features = self.features_.sample(ind_size)
return features
self.toolbox.register("individual", get_individual)
# Define population
self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual)
self.population = self.toolbox.population(n=self.pop_size)
return self
def _fit(self, X, y, groups):
# Define crossover & mutation
mate = CROSSOVER[self.crossover]
self.toolbox.register("mate", mate, random_state=self.rstate)
self.toolbox.register("mutate", mutSubset, random_state=self.rstate, indpb=self.mutation)
# Define evaluation & selection
self.toolbox.register("eval", self.eval_subset, X=X, y=y, groups=groups)
self.toolbox.register("select", tools.selTournament, tournsize=5, fit_attr='score')
while not self.n_gen or self.k_gen_ < self.n_gen:
if self.verbose:
logmsg(f'GENERATION {self.k_gen_+1}')
try:
offspring = []
# Apply crossover
if self.k_gen_ > 0:
weights = [ind.score for ind in self.population]
weights = get_ranks(weights, normalize=True)
else:
weights = None
for _ in range(self.pop_size):
ind1, ind2 = self.rstate.choice(self.population, 2, p=weights)
child, _ = self.toolbox.mate(ind1, ind2)
offspring.append(child)
# Apply mutation
for ind in offspring:
self.toolbox.mutate(ind)
# Evaluate
for ind in offspring:
self.toolbox.eval(ind)
# Select
self.population = self.toolbox.select(offspring, k=self.pop_size)
self.k_gen_ += 1
except KeyboardInterrupt:
break
if self.verbose:
print()
scores = [ind.score for ind in offspring]
avg = np.mean(scores)
std = np.std(scores)
logmsg('SCORE AVG: {:.{n}f} ± {:.{n}f}'.format(avg, std, n=self.n_digits))
logmsg('SCORE MIN: {:.{n}f}'.format(np.min(scores), n=self.n_digits))
logmsg('SCORE MAX: {:.{n}f}'.format(np.max(scores), n=self.n_digits))
print()
sizes = [ind.n_selected for ind in offspring]
avg = int(np.mean(sizes))
std = int(np.std(sizes))
logmsg('SIZE AVG: {} ± {}'.format(avg, std))
logmsg('SIZE MIN: {}'.format(np.min(sizes)))
logmsg('SIZE MAX: {}'.format(np.max(sizes)))
print()
times = [ind.eval_time for ind in offspring]
time_avg = secfmt(np.mean(times))
time_sum = secfmt(np.sum(times))
logmsg('TIME SUM: {}'.format(time_sum))
logmsg('TIME AVG: {}'.format(time_avg))
print()
return self
def get_subset(self):
return self.best_subset_
def plot_progress(self, **kwargs):
kwargs_ = dict(marker='.', linestyle='--', alpha=0.3, c='g')
kwargs_.update(kwargs)
return _plot_progress(self, **kwargs_)
def plot_subset(self, **kwargs):
kwargs_ = dict(marker='.', linestyle='--', alpha=0.3, c='g')
kwargs_.update(kwargs)
return _plot_subset(self, **kwargs_)
class GroupGeneticSelector(_WrappedGroupSelector, GeneticSelector):
pass | /robusta-0.0.1.tar.gz/robusta-0.0.1/selector/genetic.py | 0.496582 | 0.400515 | genetic.py | pypi |
from sklearn.utils.random import check_random_state
from copy import copy
import pandas as pd
import numpy as np
class FeatureSubset:
def __init__(self, features, subset=None, mask=None, group=False):
# Features
msg = '<features> must be unique'
assert len(set(features)) == len(features), msg
if group:
self.features = features.get_level_values(0).unique()
self.group = True
else:
self.features = np.array(features)
self.group = False
# subset OR mask
if subset is not None and mask is not None:
raise ValueError('<subset> & <mask> could not be set at once')
elif subset is not None:
self.set_subset(subset)
elif mask is not None:
self.set_mask(mask)
else:
self.set_mask([True]*self.n_features)
def __iter__(self):
return iter(self.subset)
def __len__(self):
return self.n_selected
def __array__(self, *args, **kwargs):
return np.array(self.subset, *args, **kwargs)
def __str__(self):
return self.subset.__str__()
def __repr__(self):
nm = self.__class__.__name__
st = self.__str__().replace('\n', '\n ' + ' '*len(nm))
return '{}({})'.format(nm, st)
def __eq__(self, other):
return np.all(self.mask == other.mask)
def set_subset(self, subset):
msg = 'Not all <subset> values are in <features>'
assert np.isin(subset, self.features).all(), msg
msg = 'All <subset> values must be unique'
assert len(set(subset)) == len(subset), msg
self.set_mask(np.isin(self.features, subset))
return self
def set_mask(self, mask):
msg = '<mask> length must be the same as <features>'
assert len(mask) == self.n_features, msg
self.mask = np.array(mask, dtype=bool)
self.subset = self.features[self.mask]
return self
def sample(self, size=None, random_state=None):
rstate = check_random_state(random_state)
if size:
subset = rstate.choice(self.features, size=size, replace=False)
return self.copy().set_subset(subset)
else:
mask = rstate.randint(0, 2, size=self.n_features, dtype=bool)
return self.copy().set_mask(mask)
def remove(self, *features, copy=True):
self = self.copy() if copy else self
msg = 'All elements must be unique'
assert len(set(features)) == len(features), msg
msg = 'All elements must be in <subset>'
assert np.isin(features, self.subset).all(), msg
mask = np.isin(self.features, features)
self.set_mask(self.mask ^ mask)
return self
def append(self, *features, copy=True):
self = self.copy() if copy else self
msg = 'All elements must be unique'
assert len(set(features)) == len(features), msg
msg = 'All elements must be in <features>'
assert np.isin(features, self.features).all(), msg
msg = 'Some elements already in <subset>'
assert not np.isin(features, self.subset).any(), msg
self.set_subset(np.append(self.subset, features))
return self
def copy(self):
return copy(self)
@property
def n_features(self):
return len(self.features)
@property
def n_selected(self):
return len(self.subset)
@property
def shape(self):
return (self.n_selected, ) | /robusta-0.0.1.tar.gz/robusta-0.0.1/selector/_subset.py | 0.5144 | 0.498047 | _subset.py | pypi |
import pandas as pd
import numpy as np
from sklearn.exceptions import NotFittedError
from robusta.utils import all_subsets
from .base import _WrappedSelector, _WrappedGroupSelector
class ExhaustiveSelector(_WrappedSelector):
'''Exhaustive feature selector for sampling and evaluating all possible
feature subsets of specified size.
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
cv : int, cross-validation generator or iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
scoring : string, callable or None, optional, default: None
A string or a scorer callable object / function with signature
``scorer(estimator, X, y)`` which should return only a single value.
If None, the estimator's default scorer (if available) is used.
min_features, max_features : int or float
Minimum & maximum number of features. If float, interpreted as
percentage of total number of features. <max_features> must be greater
or equal to the <min_features>.
n_jobs : int or None, optional (default=-1)
The number of jobs to run in parallel. None means 1.
verbose : int, optional (default=1)
Verbosity level
Attributes
----------
features_ : list of string
Feature names
n_features_ : int
Total number of features
min_features_, max_features_ : int
Minimum and maximum subsets size
weights_ : Series
Subset sizes weights (not normalized)
rstate_ : object
Random state instance
trials_ : DataFrame
All evaluated subsets:
- 'subset': subset of feature names
- 'score': average cross-validation score
- 'time': fitting time
best_iter_: int
Best trial's index
best_score_: float
Best trial's score
best_subset_: list of string
Best subset of features
total_time_: float
Total optimization time (seconds)
'''
def __init__(self, estimator, cv=5, scoring=None, min_features=0.5, n_jobs=-1,
max_features=0.9, verbose=1, n_digits=4, cv_kwargs={}):
self.estimator = estimator
self.min_features = min_features
self.max_features = max_features
self.cv = cv
self.scoring = scoring
self.n_jobs = n_jobs
self.verbose = verbose
self.n_digits = n_digits
self.cv_kwargs = cv_kwargs
def fit(self, X, y, groups=None):
self._fit_start(X)
self._fit(X, y, groups)
return self
def partial_fit(self, X, y, groups=None):
self._fit_start(X, partial=True)
self._fit(X, y, groups)
return self
def _fit_start(self, X, partial=False):
self._set_features(X)
if not partial:
k_range = range(self.min_features_, self.max_features_+1)
self.subsets_ = all_subsets(self.features_, k_range)
self.subsets_ = list(self.subsets_)
self.max_iter = len(self.subsets_)
self._reset_trials()
if not hasattr(self, 'k_iter') or not partial:
self.k_iter = 0
return self
def _fit(self, X, y, groups):
while self.k_iter < self.max_iter:
subset = self.subsets_[self.k_iter]
try:
self.eval_subset(subset, X, y, groups)
except KeyboardInterrupt:
break
self.k_iter += 1
return self
def get_subset(self):
if hasattr(self, 'best_subset_'):
return self.best_subset_
else:
model_name = self.__class__.__name__
raise NotFittedError(f'{model_name} is not fitted')
class GroupExhaustiveSelector(_WrappedGroupSelector, ExhaustiveSelector):
pass | /robusta-0.0.1.tar.gz/robusta-0.0.1/selector/exhaustive.py | 0.720958 | 0.558688 | exhaustive.py | pypi |
import pandas as pd
import numpy as np
from time import time
from sklearn.utils.random import check_random_state
from sklearn.model_selection import check_cv
from sklearn.exceptions import NotFittedError
from sklearn.base import clone, is_classifier
from .base import _WrappedSelector, _WrappedGroupSelector
from ..importance import *
class RFE(_WrappedSelector):
"""Feature ranking with recursive feature elimination (RFE) and
cross-validated selection of the best number of features.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and the importance of each feature is obtained either through a
<coef_> attribute or through a <feature_importances_> attribute. Then, the
least important features are pruned from current set of features. That
procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
The estimator must have either a <feature_importances_> or <coef_>
attribute after fitting.
cv : int, cross-validation generator or iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
scoring : string, callable or None, optional, default: None
A string or a scorer callable object / function with signature
``scorer(estimator, X, y)`` which should return only a single value.
If None, the estimator's default scorer (if available) is used.
min_features : int or float, optional (default=0.5)
The number of features to select. Float values means percentage of
features to select. E.g. value 0.5 (by default) means 50% of features.
step : int or float, optional (default=1)
The number of features to remove on each step. Float values means
percentage of left features. E.g. 0.2 means 20% reduction on each step:
500 => 400 => 320 => ...
random_state : int or None (default=0)
Random seed for permutations in PermutationImportance.
Ignored if <importance_type> set to 'inbuilt'.
n_jobs : int or None, optional (default=-1)
The number of jobs to run in parallel. None means 1.
verbose : int, optional (default=1)
Verbosity level
Attributes
----------
use_cols_ : list of string
Feature names to select
n_features_ : int
Number of selected features
min_features_ : int
Minimum number of features
"""
def __init__(self, estimator, cv=5, scoring=None, min_features=0.5, step=1,
use_best=True, n_jobs=None, verbose=1, n_digits=4, cv_kwargs={}):
self.estimator = estimator
self.scoring = scoring
self.cv = cv
self.cv_kwargs = cv_kwargs
self.min_features = min_features
self.step = step
self.use_best = use_best
self.n_jobs = n_jobs
self.verbose = verbose
self.n_digits = n_digits
def fit(self, X, y, groups=None):
self._fit_start(X)
self._fit(X, y, groups)
return self
def partial_fit(self, X, y, groups=None):
self._fit_start(X, partial=True)
self._fit(X, y, groups)
return self
def _fit_start(self, X, partial=False):
if not partial:
self._set_features(X)
self.subset_ = self.features_
self._reset_trials()
self.k_range_ = []
k_features = self.n_features_
while k_features > self.min_features_:
step = _check_step(self.step, k_features, self.min_features_)
k_features = k_features - step
self.k_range_.append(k_features)
self.max_iter = len(self.k_range_) + getattr(self, 'n_iters_', 0) + 1
self.k_range_ = iter(self.k_range_)
return self
@property
def k_features_(self):
return len(self.subset_)
@property
def forward(self):
return False
def _fit(self, X, y, groups):
self.eval_subset(self.subset_, X, y, groups)
for k in self.k_range_:
try:
scores = self.subset_.importance
subset = _select_k_best(scores, k)
parent = self.subset_
self.subset_ = self.subset_.copy().set_subset(subset)
self.subset_.parents = [parent]
self.eval_subset(self.subset_, X, y, groups)
if self.k_features_ <= self.min_features_:
break
except KeyboardInterrupt:
break
return self
def get_subset(self):
if (self.use_best is True) and self.n_iters_ > 0:
return self.best_subset_
elif (self.use_best is False) and len(self.subset_) > 0:
return self.subset_
else:
model_name = self.__class__.__name__
raise NotFittedError(f'{model_name} is not fitted')
class GroupRFE(_WrappedGroupSelector, RFE):
pass
class PermutationRFE(RFE):
def __init__(self, estimator, cv=5, scoring=None, min_features=0.5, step=1,
n_repeats=5, random_state=0, use_best=True, n_jobs=None,
verbose=1, n_digits=4, tqdm=None, y_transform=None):
self.estimator = estimator
self.scoring = scoring
self.cv = cv
self.y_transform = y_transform
self.min_features = min_features
self.step = step
self.n_repeats = n_repeats
self.random_state = random_state
self.use_best = use_best
self.n_jobs = n_jobs
self.verbose = verbose
self.n_digits = n_digits
self.tqdm = tqdm
def _eval_subset(self, subset, X, y, groups=None):
perm = PermutationImportance(self.estimator, self.cv, self.scoring,
self.n_repeats, n_jobs=self.n_jobs,
random_state=self.random_state,
tqdm=self.tqdm,
y_transform=self.y_transform)
perm.fit(X[subset], y, groups)
subset.score = np.average(perm.scores_)
subset.score_std = np.std(perm.scores_)
subset.importance = perm.feature_importances_
subset.importance_std = perm.feature_importances_std_
return subset
class GroupPermutationRFE(_WrappedGroupSelector, PermutationRFE):
def _eval_subset(self, subset, X, y, groups=None):
perm = GroupPermutationImportance(self.estimator, self.cv, self.scoring,
self.n_repeats, n_jobs=self.n_jobs,
random_state=self.random_state,
tqdm=self.tqdm)
perm.fit(X[subset], y, groups)
subset.score = np.average(perm.scores_)
subset.score_std = np.std(perm.scores_)
subset.importance = perm.feature_importances_
subset.importance_std = perm.feature_importances_std_
return subset
class ShuffleRFE(RFE):
def __init__(self, estimator, cv=5, scoring=None, min_features=0.5, step=1,
n_repeats=5, gain='dif', random_state=0, use_best=True,
n_jobs=None, tqdm=False, verbose=0, cv_kwargs={}):
self.estimator = estimator
self.scoring = scoring
self.cv = cv
self.cv_kwargs = cv_kwargs
self.min_features = min_features
self.step = step
self.n_repeats = n_repeats
self.gain = gain
self.random_state = random_state
self.use_best = use_best
self.n_jobs = n_jobs
self.verbose = verbose
self.tqdm = tqdm
def _eval_subset(self, subset, X, y, groups=None):
shuff = ShuffleTargetImportance(self.estimator, self.cv, self.scoring,
n_repeats=self.n_repeats, gain=self.gain,
n_jobs=self.n_jobs, tqdm=self.tqdm,
random_state=self.random_state,
cv_kwargs=self.cv_kwargs)
shuff.fit(X[subset], y, groups)
subset.score = np.average(shuff.scores_)
subset.score_std = np.std(shuff.scores_)
subset.importance = shuff.feature_importances_
subset.importance_std = shuff.feature_importances_std_
return subset
class GroupShuffleRFE(_WrappedGroupSelector, ShuffleRFE):
def _eval_subset(self, subset, X, y, groups=None):
shuff = ShuffleTargetImportance(self.estimator, self.cv, self.scoring,
n_repeats=self.n_repeats, gain=self.gain,
n_jobs=self.n_jobs, tqdm=self.tqdm,
random_state=self.random_state,
cv_kwargs=self.cv_kwargs)
shuff.fit(X[subset], y, groups)
subset.score = np.average(shuff.scores_)
subset.score_std = np.std(shuff.scores_)
result = dict(importance=shuff.raw_importances_, features=list(X[subset]))
subset = self._get_importance(subset, result)
return subset
def _select_k_best(scores, k_best):
return scores.index[np.argsort(-scores.values)][:k_best]
def _check_step(step, n_features, k_features):
if isinstance(step, int):
if step > 0:
step = step
else:
raise ValueError('Integer <step> must be greater than 0')
elif isinstance(step, float):
if 0 < step < 1:
step = max(step * n_features, 1)
step = int(step)
else:
raise ValueError('Float <step> must be from interval (0, 1)')
else:
raise ValueError(f'Parameter <step> must be int or float, got {step}')
return min(step, int(n_features-k_features)) | /robusta-0.0.1.tar.gz/robusta-0.0.1/selector/rfe.py | 0.755907 | 0.518546 | rfe.py | pypi |
import numpy as np
import pandas as pd
from robusta.selector.base import _WrappedSelector, _WrappedGroupSelector, _check_k_features
from sklearn.utils.random import check_random_state
def perturb_subset(subset, step, random_state=None, drop_attrs=['score']):
rstate = check_random_state(random_state)
update = rstate.choice(subset.features, step, False)
del_list = set(subset) & set(update)
add_list = set(update) - set(subset)
subset_ = subset.copy()
subset_ = subset_.remove(*del_list)
subset_ = subset_.append(*add_list)
for attr in drop_attrs:
if hasattr(subset_, attr):
delattr(subset_, attr)
subset_.parents = (subset, )
return subset_
class SAS(_WrappedSelector):
def __init__(self, estimator, cv=5, scoring=None, min_step=0.01, max_step=0.05,
min_features=0.1, max_features=0.9, max_iter=50, temp=1.0,
random_state=None, n_jobs=None, verbose=1, n_digits=4,
cv_kwargs={}):
self.estimator = estimator
self.cv = cv
self.cv_kwargs = cv_kwargs
self.scoring = scoring
self.min_features = min_features
self.max_features = max_features
self.min_step = min_step
self.max_step = max_step
self.max_iter = max_iter
self.temp = temp
self.random_state = random_state
self.verbose = verbose
self.n_digits = n_digits
self.n_jobs = n_jobs
def fit(self, X, y, groups=None):
self._fit_start(X, y, groups)
self._fit(X, y, groups)
return self
def partial_fit(self, X, y, groups=None):
self._fit(X, y, groups)
return self
def _fit_start(self, X, y, groups):
# Basic
self.rstate_ = check_random_state(self.random_state)
self._set_features(X)
self._reset_trials()
# First trial
k_min = self.min_features_
k_max = self.max_features_
k = self.rstate_.choice(range(k_min, k_max+1))
subset = self.features_.sample(size=k, random_state=self.rstate_)
self.eval_subset(subset, X, y, groups)
self.subset_ = subset
return self
def _fit(self, X, y, groups=None):
while self.n_iters_ < self.max_iter:
try:
# Pertrub the current subset
k_min = self.min_step_
k_max = self.max_step_
k = self.rstate_.choice(range(k_min, k_max+1))
subset = perturb_subset(self.subset_, k, self.rstate_)
# Evaluate perfomance
self.eval_subset(subset, X, y, groups)
old_score = self.subset_.score
new_score = subset.score
if new_score > old_score:
self.subset_ = subset
else:
# Acceptance probability
temp = self.temp * self.max_iter / self.n_iters_
diff = (old_score - new_score) / abs(old_score)
prob = np.exp(-diff/temp)
if self.rstate_.rand() < prob:
self.subset_ = subset
except KeyboardInterrupt:
break
return self
@property
def min_step_(self):
min_step = _check_k_features(self.min_step,
self.n_features_,
'min_step')
return min_step
@property
def max_step_(self):
max_step = _check_k_features(self.max_step,
self.n_features_,
'max_step')
return max_step
class GroupSAS(_WrappedGroupSelector, SAS):
pass | /robusta-0.0.1.tar.gz/robusta-0.0.1/selector/sas.py | 0.464902 | 0.218253 | sas.py | pypi |
import pandas as pd
import numpy as np
import scipy
from itertools import combinations
from sklearn.base import clone, BaseEstimator, TransformerMixin
from sklearn.preprocessing import PowerTransformer, QuantileTransformer
from sklearn.preprocessing import Normalizer, normalize
from dask_ml.preprocessing import PolynomialFeatures
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array
from scipy.special import boxcox
__all__ = [
'DowncastTransformer',
'GaussRankTransformer',
'QuantileTransformer',
'StandardScaler',
'RobustScaler',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'Winsorizer',
'SyntheticFeatures',
'KBinsDiscretizer1D',
'KBinsDiscretizer',
'PowerTransformer',
'Binarizer',
'PolynomialFeatures',
]
NP_INT_DTYPES = ['int64', 'int32', 'int16', 'int8', 'uint32', 'uint16', 'uint8']
PD_INT_DTYPES = ['Int64', 'Int32', 'Int16', 'Int8', 'UInt32', 'UInt16', 'UInt8']
FLOAT_DTYPES = ['float64', 'float32', 'float16']
class DowncastTransformer(BaseEstimator, TransformerMixin):
"""Downcast numeric columns to the smallest numerical dtype possible
according to the following rules:
- ‘integer’ or ‘signed’: smallest signed int dtype (min.: np.int8)
- ‘unsigned’: smallest unsigned int dtype (min.: np.uint8)
- ‘float’: smallest float dtype (min.: np.float32)
Parameters
----------
errors : {‘ignore’, ‘raise’}, default ‘raise’
If ‘raise’, then non-numeric columns will raise an exception
If ‘ignore’, then non-numeric columns will be passed with no changes
copy : bool, default True
If False, change original dataframe
"""
def __init__(self, numpy_only=True, errors='raise', copy=True):
self.numpy_only = numpy_only
self.errors = errors
self.copy = copy
def fit(self, X, y=None):
'''Only checks <errors> & <copy> parameters
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform
Returns
-------
self
'''
# Select numeric
self.cols = list(X.columns)
self.nums = list(X.select_dtypes(np.number))
self.dtypes = X.dtypes.copy()
# Check <errors> value
errors_vals = ['raise', 'ignore']
if self.errors not in errors_vals:
raise ValueError('<errors> must be in {}'.format(errors_vals))
if len(self.nums) < len(self.cols) and self.errors is 'raise':
cols_diff = list(set(self.cols) - set(self.nums))
raise ValueError("Found non-numeric columns {}".format(cols_diff))
return self
def transform(self, X):
"""Downcast each column to the efficient dtype
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform
Returns
-------
Xt : DataFrame, shape [n_samples, n_features]
Transformed input.
"""
# Check columns
cols_diff = set(self.cols) ^ set(X.columns)
nums_diff = set(self.nums) ^ set(X.select_dtypes(np.number))
if len(cols_diff) > 0:
raise ValueError("Found new columns {}".format(cols_diff))
if len(nums_diff) > 0:
raise ValueError("Found new numeric columns {}".format(nums_diff))
# Fit & transform
for col, x in X[self.nums].items():
col_type = self._fit_downcast(x)
self.dtypes[col] = col_type
return X.astype(self.dtypes, errors=self.errors, copy=self.copy)
def _fit_downcast(self, x):
x_min = x.min()
x_max = x.max()
try:
INT_DTYPES = NP_INT_DTYPES if self.numpy_only else PD_INT_DTYPES
if (x.astype(INT_DTYPES[0]) != x).any():
raise ValueError()
col_type = INT_DTYPES[0]
col_bits = np.iinfo(col_type).bits
for int_type in INT_DTYPES:
int_info = np.iinfo(int_type)
if (x_min >= int_info.min) \
and (x_max <= int_info.max) \
and (col_bits >= int_info.bits):
col_bits = int_info.bits
col_type = int_type
except:
col_type = FLOAT_DTYPES[0]
col_bits = np.finfo(col_type).bits
for float_type in FLOAT_DTYPES:
float_info = np.finfo(float_type)
if (x_min >= float_info.min) \
and (x_max <= float_info.max) \
and (col_bits > float_info.bits):
col_bits = float_info.bits
col_type = float_type
return col_type
class QuantileTransformer(QuantileTransformer):
def transform(self, X):
return_df = hasattr(X, 'columns')
if return_df:
columns = X.columns
index = X.index
X = self._check_inputs(X)
self._check_is_fitted(X)
self._transform(X, inverse=False)
if return_df:
return pd.DataFrame(X, columns=columns, index=index)
else:
return X
class GaussRankTransformer(BaseEstimator, TransformerMixin):
"""Normalize numerical features by Gauss Rank scheme.
http://fastml.com/preparing-continuous-features-for-neural-networks-with-rankgauss/
Input normalization for gradient-based models such as neural nets is critical.
For lightgbm/xgb it does not matter. The best what I found during the past and
works straight of the box is “RankGauss”. Its based on rank transformation.
1. First step is to assign a linspace to the sorted features from -1 to 1,
2. then apply the inverse of error function ErfInv to shape them like gaussians,
3. then substract the mean. Binary features are not touched with this trafo.
This works usually much better than standard mean/std scaler or min/max.
Parameters
----------
ranker : object
Fitted transformer
eps : float, default=1e-9
Inversed Error Function (ErfInv) is undefined for x=-1 or x=+1, so
its argument is clipped to range [-1 + eps, +1 - eps]
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a copy.
"""
def __init__(self, ranker=QuantileTransformer(), copy=True, eps=1e-9):
self.ranker = ranker
self.copy = copy
self.eps = eps
def fit(self, X, y=None):
return self
def transform(self, X):
if self.copy:
X, self.ranker_ = X.copy(), clone(self.ranker)
else:
X, self.ranker_ = X, self.ranker
X = self.ranker_.fit_transform(X)
X -= 0.5
X *= 2.0 - self.eps
X = scipy.special.erfinv(X)
return X
class Winsorizer(BaseEstimator, TransformerMixin):
"""Winsorization
Replace extreme values with defined quantiles (0.05 and 0.95 by default).
Parameters
----------
q_min : float [0..1], default=0.05
Lower quantile
q_max : float [0..1], default=0.95
Upper quantile
apply_test : bool, default=True
If False, apply winsorization only for <fit_transform>.
If True, apply to both <fit_transform> and <transform>.
"""
def __init__(self, q_min=0.05, q_max=0.95, apply_test=True):
self.apply_test = apply_test
self.q_min = q_min
self.q_max = q_max
def fit_transform(self, X, y=None):
return self._fit(X).transform(X)
def fit(self, X, y=None):
return self._fit(X)
def transform(self, X):
return X.clip(self.min_, self.max_, axis=1) if self.apply_test else X
def _fit(self, X):
assert self.apply_test in [True, False], '<apply_test> must be boolean'
assert isinstance(self.q_min, float), '<q_min> must be float'
assert isinstance(self.q_max, float), '<q_max> must be float'
assert self.q_min < self.q_max, '<q_min> must be smaller than <q_max>'
assert 0 <= self.q_min <= 1, '<q_min> must be in [0..1]'
assert 0 <= self.q_max <= 1, '<q_max> must be in [0..1]'
self.min_ = X.quantile(self.q_min)
self.max_ = X.quantile(self.q_max)
return self
class SyntheticFeatures(BaseEstimator, TransformerMixin):
def __init__(self, pair_sum=True, pair_dif=True, pair_mul=True, pair_div=True,
join_X=True, eps=1e-2):
self.pair_sum = pair_sum
self.pair_dif = pair_dif
self.pair_mul = pair_mul
self.pair_div = pair_div
self.join_X = join_X
self.eps = eps
def fit(self, X, y=None):
if isinstance(X, pd.core.frame.DataFrame):
self.columns = X.columns
else:
self.columns = ['x_{}'.format(i) for i in range(X.shape[1])]
return self
def transform(self, X):
if isinstance(X, pd.core.frame.DataFrame):
inds = X.index
else:
inds = np.arange(X.shape[0])
X = pd.DataFrame(X, columns=self.columns, index=inds)
Xt = pd.DataFrame(index=inds)
cols_pairs = np.array(list(combinations(self.columns, 2)))
cols_A = cols_pairs[:,0]
cols_B = cols_pairs[:,1]
if self.pair_sum:
cols = ['{}+{}'.format(a, b) for a, b in cols_pairs]
F = np.vstack([X[a].values + X[b].values for a, b in cols_pairs]).T
F = pd.DataFrame(F, index=inds, columns=cols)
Xt = Xt.join(F)
if self.pair_dif:
cols = ['{}-{}'.format(a, b) for a, b in cols_pairs]
F = np.vstack([X[a].values - X[b].values for a, b in cols_pairs]).T
F = pd.DataFrame(F, index=inds, columns=cols)
Xt = Xt.join(F)
if self.pair_mul:
cols = ['{}*{}'.format(a, b) for a, b in cols_pairs]
F = np.vstack([X[a].values * X[b].values for a, b in cols_pairs]).T
F = pd.DataFrame(F, index=inds, columns=cols)
Xt = Xt.join(F)
if self.pair_div:
cols = ['{}/{}'.format(a, b) for a, b in cols_pairs]
F = np.vstack([X[a].values / (X[b].values + self.eps) for a, b in cols_pairs]).T
F = pd.DataFrame(F, index=inds, columns=cols)
Xt = Xt.join(F)
cols = ['{}/{}'.format(a, b) for b, a in cols_pairs]
F = np.vstack([X[a].values / (X[b].values + self.eps) for b, a in cols_pairs]).T
F = pd.DataFrame(F, index=inds, columns=cols)
Xt = Xt.join(F)
if self.join_X:
Xt = X.join(Xt)
return Xt
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
Parameters
----------
with_median : bool, default=True
If True, center the data before scaling.
with_scaling : bool, default=True
If True, scale the data to interquartile range.
quantiles : tuple (q_min, q_max), 0.0 < q_min < q_max < 1.0
Default: (0.25, 0.75) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
"""
def __init__(self, centering=True, scaling=True, quantiles=(0.25, 0.75),
copy=True, eps=1e-3):
self.centering = centering
self.scaling = scaling
self.quantiles = quantiles
self.copy = copy
self.eps = eps
def fit(self, X, y=None):
q_min, q_max = self.quantiles
if not 0 <= q_min <= q_max <= 1:
raise ValueError(f"Invalid quantiles: {self.quantiles}")
if self.centering:
self.center_ = X.median()
if self.scaling:
self.scale_ = X.quantile(q_max) - X.quantile(q_min)
self.scale_[self.scale_ < self.eps] = 1
return self
def transform(self, X):
X = X.copy() if self.copy else X
if self.centering:
X -= self.center_
if self.scaling:
X /= self.scale_
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance.
Parameters
----------
with_mean : boolean, default=True
If True, center the data before scaling.
with_std : bool, default=True
If True, scale the data to unit variance.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
Attributes
----------
mean_ : Series of floats
Mean values
std_ : Series of floats
Std values
"""
def __init__(self, with_mean=True, with_std=True, copy=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
self.mean_ = X.mean() if self.with_mean else None
self.std_ = X.std() if self.with_std else None
return self
def transform(self, X):
X = X.copy() if self.copy else X
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Scale features to [0, 1]
Parameters
----------
copy : bool, default=True
If False, try to avoid a copy and do inplace scaling instead.
Attributes
----------
min_ : Series of floats
Minimum values
max_ : Series of floats
Maximum values
std_ : Series of floats
Difference between <max_> and <min_>
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
self.min_ = X.min()
self.max_ = X.max()
self.std_ = self.max_ - self.min_
return self
def transform(self, X):
X = X.copy() if self.copy else X
X -= self.min_
X /= self.std_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
Parameters
----------
copy : bool, default=True
If False, try to avoid a copy and do inplace scaling instead.
Attributes
----------
scale_ : Series of floats
Mudule of maxabs values
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
a = X.min()
b = X.max()
self.scale_ = pd.concat([a, b], axis=1).abs().max(axis=1)
return self
def transform(self, X):
X = X.copy() if self.copy else X
X /= self.scale_
return X
class Normalizer(Normalizer):
def transform(self, X):
return_df = hasattr(X, 'columns')
if return_df:
columns = X.columns
index = X.index
X = check_array(X, accept_sparse='csr')
X = normalize(X, axis=1, copy=self.copy, norm=self.norm)
if return_df:
return pd.DataFrame(X, columns=columns, index=index)
else:
return X
class KBinsDiscretizer1D(BaseEstimator, TransformerMixin):
"""Continuous feature binning
Parameters
----------
bins : int or array-like
Number of bins, or quantiles (if strategy='quantile'), or bin edges
(if strategy='uniform').
strategy : {'quantile', 'uniform'}
If <bins> is int, determines binning type:
- 'quantile': split feature to the equal size bins
- 'uniform': split feature by the equal distance edges
If <bins> is array-like, interpreted as type of passed edges:
- 'quantile': quantiles (must be monotonic and in range [0..1])
- 'uniform': exact edges values
Attributes
----------
bins_ : array of floats
Defined bins edges
"""
def __init__(self, bins=5, strategy='quantile'):
self.bins = bins
self.strategy = strategy
def fit(self, y):
if self.strategy is 'quantile':
_, self.bins_ = pd.qcut(y, self.bins, retbins=True, duplicates='drop')
elif self.strategy is 'uniform':
_, self.bins_ = pd.cut(y, self.bins, retbins=True, duplicates='drop')
else:
raise ValueError(f"Unknown strategy value passed: {self.strategy}")
return self
def transform(self, y):
return pd.cut(y, self.bins_)
class KBinsDiscretizer(KBinsDiscretizer1D):
"""Continuous feature binning
Parameters
----------
bins : int or array-like
Number of bins, or quantiles (if strategy='quantile'), or bin edges
(if strategy='uniform').
strategy : {'quantile', 'uniform'}
If <bins> is int, determines binning type:
- 'quantile': split feature to the equal size bins
- 'uniform': split feature by the equal distance edges
If <bins> is array-like, interpreted as type of passed edges:
- 'quantile': quantiles (must be monotonic and in range [0..1])
- 'uniform': exact edges values
Attributes
----------
bins_ : dict of array of floats
Defined bins edges
"""
def fit(self, X, y=None):
self.transformers = {}
self.bins_ = {}
for col in X:
params = self.get_params()
transformer = KBinsDiscretizer1D(**params).fit(X[col])
self.transformers[col] = transformer
self.bins_[col] = transformer.bins_
return self
def transform(self, X):
Xt = pd.DataFrame(index=X.index)
for col, transformer in self.transformers.items():
Xt[col] = transformer.transform(X[col])
return Xt
class PowerTransformer(PowerTransformer):
def fit_transform(self, X, y=None):
return_df = hasattr(X, 'columns')
if return_df:
columns = X.columns
index = X.index
X = self._fit(X, y, force_transform=True)
if return_df:
return pd.DataFrame(X, columns=columns, index=index)
else:
return X
def transform(self, X):
return_df = hasattr(X, 'columns')
if return_df:
columns = X.columns
index = X.index
check_is_fitted(self, 'lambdas_')
X = self._check_input(X, check_positive=True, check_shape=True)
transform_function = {'box-cox': boxcox,
'yeo-johnson': self._yeo_johnson_transform
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid='ignore'): # hide NaN warnings
X[:, i] = transform_function(X[:, i], lmbda)
if self.standardize:
X = self._scaler.transform(X)
if return_df:
return pd.DataFrame(X, columns=columns, index=index)
else:
return X
return X
class Binarizer(BaseEstimator, TransformerMixin):
'''Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Parameters
----------
threshold : float or array-like, shape (n_features, )
Single or array of threshold values
'''
def __init__(self, threshold=0.0):
self.threshold = threshold
def fit(self, X, y=None):
return self
def transform(self, X):
return (X > self.threshold).astype('uint8')
class PolynomialFeatures(PolynomialFeatures):
def __init__(self, degree=2, interaction_only=False, include_bias=True,
preserve_dataframe=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
self.preserve_dataframe = preserve_dataframe | /robusta-0.0.1.tar.gz/robusta-0.0.1/preprocessing/numeric.py | 0.807309 | 0.404213 | numeric.py | pypi |
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from typing import Iterable
__all__ = [
'TypeSelector',
'TypeConverter',
'ColumnSelector',
'ColumnFilter',
'ColumnRenamer',
'ColumnGrouper',
'SimpleImputer',
'Identity',
'FunctionTransformer',
]
class ColumnSelector(BaseEstimator, TransformerMixin):
'''Select specified columns.
Useful for freezing Feature Selection after subset search is ended.
Parameters
----------
columns : list of strings
Columns to select.
'''
def __init__(self, columns=None):
self.columns = columns
def fit(self, X, y=None):
'''Does nothing.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
self: ColumnSelector
This estimator.
'''
return self
def transform(self, X):
"""Select specified columns from X.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
Xt : DataFrame, shape [n_samples, n_features]
Transformed input.
"""
if isinstance(self.columns, str):
columns = self.columns
elif isinstance(self.columns, Iterable):
columns = self.columns
else:
columns = X.columns
try:
return X[columns]
except KeyError:
cols_error = list(set(columns) - set(X.columns))
raise KeyError("The DataFrame does not include the "
"columns: %s" % cols_error)
class ColumnFilter(BaseEstimator, TransformerMixin):
'''Select specified columns.
Useful for freezing Feature Selection after subset search is ended.
Parameters
----------
columns : list of strings
Columns to select.
'''
def __init__(self, func, **kwargs):
self.func = func
self.kwargs = kwargs
def fit(self, X, y=None):
'''Does nothing.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
self: ColumnSelector
This estimator.
'''
self.features = list(filter(self.func, X.columns))
return self
def transform(self, X):
"""Select specified columns from X.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
Xt : DataFrame, shape [n_samples, n_features]
Transformed input.
"""
return X[self.features]
class TypeSelector(BaseEstimator, TransformerMixin):
'''Select columns of specified type.
Parameters
----------
dtype : type
Attributes
----------
columns_ : list of string
Columns of the determined type
'''
def __init__(self, dtype):
self.dtype = dtype
def fit(self, X, y=None):
'''Get names of columns of specified type.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
self
'''
if hasattr(self.dtype, '__iter__') and not isinstance(self.dtype, str):
self.dtypes = self.dtype
else:
self.dtypes = [self.dtype]
self.columns_ = X.select_dtypes(include=self.dtypes).columns
return self
def transform(self, X):
"""Select columns of specified type from X.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
Xt : DataFrame, shape [n_samples, n_features]
Transformed input.
"""
return X[self.columns_]
class TypeConverter(BaseEstimator, TransformerMixin):
'''Convert columns type(s).
Parameters
----------
dtypes : str or dict
Types to convert
Attributes
----------
dtypes_old_ : type or iterable of type
Original type(s) of data
dtypes_new_ : type or iterable of type
Defined type(s) of data
'''
def __init__(self, dtypes):
self.dtypes = dtypes
def fit(self, X, y=None):
'''Get names of columns of specified type.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
self
'''
self.dtypes_old_ = X.dtypes
self.dtypes_new_ = self.dtypes
return self
def transform(self, X):
"""Convert features type.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
Xt : DataFrame, shape [n_samples, n_features]
Transformed input.
"""
return X.astype(self.dtypes_new_)
def inverse_transform(self, X):
"""Convert features type to original.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
Xt : DataFrame, shape [n_samples, n_features]
Inverse transformed input.
"""
return X.astype(self.dtypes_old_)
class Identity(BaseEstimator, TransformerMixin):
'''Dummy transformer.
Just passes through its input.
'''
def __init__(self):
pass
def fit(self, X, y=None):
'''Does nothing.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
self
'''
return self
def transform(self, X):
"""Pass X through.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
X : DataFrame, shape [n_samples, n_features]
Same data.
"""
return X
class ColumnRenamer(BaseEstimator, TransformerMixin):
'''Select columns of specified type.
Parameters
----------
columns : None or list of strings
Columns to rename. If None, rename all.
prefix, suffix : string
String to concatenate at the beginning or/and at the end of original
column name. Both equals to empty string ('') by default (do nothing).
'''
def __init__(self, column=None, prefix='', suffix='', copy=True):
self.column = column
self.prefix = prefix
self.suffix = suffix
self.copy = copy
def fit(self, X, y=None):
'''Creates mapper from old names to new names.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
self
'''
if self.column is not None:
if callable(self.column):
features = [self.column(x) for x in X]
elif hasattr(self.column, '__iter__') and len(self.column) is X.shape[1]:
features = map(str, self.column)
elif isinstance(self.column, str):
features = [self.column + str(x) for x in range(X.shape[1])]
elif isinstance(self.column, dict):
features = []
for feature in X:
if feature in self.column:
features.append(self.column[feature])
else:
features.append(feature)
else:
raise ValueError('Unknown <column> type passed: {}'.format(self.column))
else:
features = X.columns
features = [self.prefix + x + self.suffix for x in features]
self.mapper_ = dict(zip(X.columns, features))
return self
def transform(self, X):
"""Renames selected columns.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
Xt : DataFrame, shape [n_samples, n_features]
Same data.
"""
X = X.copy() if self.copy else X
X.columns = X.columns.map(self.mapper_)
return X
class SimpleImputer(BaseEstimator, TransformerMixin):
'''Imputation transformer for completing missing values.
Parameters
----------
strategy : string, optional (default='mean')
The imputation strategy.
– 'mean': replace missing with mean along each column (numeric only).
- 'median': replace missing with median along each column (numeric only).
- 'mode': replace missing with most frequent value.
- 'const': replace missing with <fill_value>.
fill_value : string or numeric (default=None)
Set value if <strategy> is 'const'. Ignored otherwise.
copy : bool (default=True)
Whether to copy data or impute inplace
Attributes
----------
values_ : Series or single value
The imputation fill value for each feature.
'''
def __init__(self, strategy='mean', fill_value=None, copy=True):
self.strategy = strategy
self.fill_value = fill_value
self.copy = copy
def fit(self, X, y=None):
"""Calculate imputing values
Parameters
----------
X : DataFrame of shape [n_samples, n_features]
The data to fit.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
self : ColumnTransformer
This estimator
"""
self.inplace = not self.copy
if self.strategy in ['mean', 'median']:
if not X.dtypes.apply(pd.api.types.is_numeric_dtype).all():
raise ValueError("With strategy '{}' all columns must "
"be numeric.".format(self.strategy))
else:
self.values_ = X.apply(self.strategy)
elif self.strategy is 'mode':
self.values_ = X.apply('mode').loc[0]
elif self.strategy is 'const':
self.values_ = self.fill_value
else:
raise ValueError("Unknown strategy '{}'".format(self.strategy))
return self
def transform(self, X):
"""Impute missing values
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
Xt : DataFrame, shape [n_samples, n_features]
Transformed input.
"""
return X.fillna(self.values_, inplace=np.invert(self.copy))
class ColumnGrouper(BaseEstimator, TransformerMixin):
'''MultiIndex DataFrame constructor
Parameters
----------
group : string, or list of strings
Name or names for first level in MultiIndex
'''
def __init__(self, group, copy=True):
self.group = group
self.copy = copy
def fit(self, X, y=None):
'''Form new MultiIndex column names
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
self
'''
features = X.columns
if isinstance(self.group, str):
groups = [self.group] * len(features)
elif isinstance(self.group, Iterable):
groups = self.group
self.features_ = pd.MultiIndex.from_arrays([groups, features])
return self
def transform(self, X):
"""Renames columns
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
Xt : DataFrame, shape [n_samples, n_features]
Same data.
"""
X = X.copy() if self.copy else X
X.columns = self.features_
return X
class FunctionTransformer(BaseEstimator, TransformerMixin):
def __init__(self, func=None, inverse_func=None):
self.inverse_func = inverse_func
self.func = func
def fit(self, X, y=None):
return self
def transform(self, X):
return X.applymap(self.func)
def inverse_transform(self, X):
return X.applymap(self.inverse_func) | /robusta-0.0.1.tar.gz/robusta-0.0.1/preprocessing/base.py | 0.834744 | 0.500671 | base.py | pypi |
import pandas as pd
import numpy as np
from itertools import combinations
from numpy.linalg import svd
from dask_ml.preprocessing import OneHotEncoder, DummyEncoder, OrdinalEncoder
from sklearn.base import clone, BaseEstimator, TransformerMixin
from sklearn.utils.multiclass import type_of_target
from robusta.utils import all_subsets
from category_encoders import *
__all__ = [
'LabelBinarizer',
'OrdinalEncoder',
'LabelEncoder1D',
'LabelEncoder',
'Categorizer1D',
'Categorizer',
'OneHotEncoder',
'DummyEncoder',
'FrequencyEncoder',
'FeatureCombiner',
'BackwardDifferenceEncoder',
'BinaryEncoder',
'HashingEncoder',
'HelmertEncoder',
'OrdinalEncoder',
'SumEncoder',
'PolynomialEncoder',
'BaseNEncoder',
'SVDEncoder',
'ThermometerEncoder1D',
'ThermometerEncoder',
'GroupByEncoder',
]
class LabelEncoder1D(BaseEstimator, TransformerMixin):
"""Encode categories as integers.
"""
def __init__(self):
pass
def fit(self, y):
self.cats_ = y.astype('category').cat.categories
self.dtype = y.dtype
self.mapper = dict(zip(self.cats_, range(len(self.cats_))))
self.inv_mapper = {val: key for key, val in self.mapper.items()}
self.mapper[np.nan] = -1
self.inv_mapper[-1] = np.nan
return self
def transform(self, y):
return y.map(self.mapper)
def inverse_transform(self, y):
return y.map(self.inv_mapper).astype(self.dtype)
class LabelEncoder(LabelEncoder1D):
def fit(self, X, y=None):
self.transformers = {}
for col in X.columns:
self.transformers[col] = LabelEncoder1D().fit(X[col])
return self
def transform(self, X):
Xt = pd.DataFrame(index=X.index, columns=X.columns)
for col, transformer in self.transformers.items():
Xt[col] = transformer.transform(X[col])
return Xt
def inverse_transform(self, X):
Xt = pd.DataFrame(index=X.index, columns=X.columns)
for col, transformer in self.transformers.items():
Xt[col] = transformer.inverse_transform(X[col])
return Xt
class Categorizer1D(BaseEstimator, TransformerMixin):
"""Convert categories to 'category' dtype of the same range.
"""
def __init__(self):
pass
def fit(self, y):
"""Learn categories
Parameters
----------
y : Series
Returns
-------
self
"""
self.cats_ = y.astype('category').values.categories
return self
def transform(self, y):
"""Convert y to fitted categories
Parameters
----------
y : Series
Returns
-------
yt : Series
Transformed input.
"""
return pd.Categorical(y, categories=self.cats_)
class Categorizer(BaseEstimator, TransformerMixin):
"""Convert categories to 'category' dtype of the same range.
"""
def __init__(self):
pass
def fit(self, X, y=None):
"""Learn categories
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to determine the categories of each feature.
Returns
-------
self
"""
self.transformers = {}
for col in X.columns:
self.transformers[col] = Categorizer1D().fit(X[col])
return self
def transform(self, X):
"""Convert X to fitted categories
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
Xt : DataFrame, shape [n_samples, n_features]
Transformed input.
"""
Xt = pd.DataFrame(index=X.index)
for col, transformer in self.transformers.items():
Xt[col] = transformer.transform(X[col])
return Xt
class FrequencyEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical features as it's frequencies.
"""
def __init__(self, normalize=True):
self.normalize = normalize
def fit(self, X, y=None):
"""Fit FrequencyEncoder to X.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to determine frequencies.
Returns
-------
self
"""
norm = self.normalize
self.value_counts_ = {col: x.value_counts(norm) for col, x in X.items()}
return self
def transform(self, X):
"""Transform X using frequency encoding.
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
Xt : DataFrame, shape [n_samples, n_features]
Transformed input.
"""
Xt = pd.DataFrame(index=X.index)
for col, vc in self.value_counts_.items():
Xt[col] = X[col].map(vc)
return Xt.astype(float)
class FeatureCombiner(BaseEstimator, TransformerMixin):
"""Extract Feature Combinations
"""
def __init__(self, orders=[2, 3], sep=','):
self.orders = orders
self.sep = sep
def fit(self, X, y=None):
subsets = all_subsets(X.columns, self.orders)
self.subsets_ = [list(subset) for subset in subsets]
self.n_subsets_ = len(self.subsets_)
return self
def transform(self, X):
X = X.astype(str)
X = pd.concat([X[subset].apply(self.sep.join, axis=1).rename(sep(subset))
for subset in subsets], axis=1)
return X
class SVDEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical features by pairwise transforming
categorical features to the counter matrix
and embedding with SVD.
"""
def __init__(self, n_components=0.9):
self.n_components = n_components
def fit(self, X, y=None):
"""Fit data
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to determine frequencies.
Returns
-------
self
"""
# Check data
assert not X.isna().any().any(), 'Missing values are not allowed'
columns = X.columns
self.embeddings_ = {col: pd.DataFrame(index=X[col].unique()) for col in columns}
self.n_components_ = pd.DataFrame(index=columns, columns=columns)
self.sigmas_ = {}
for a, b in combinations(columns, 2):
# Count Matrix
x = X.groupby([a, b]).size().unstack().fillna(0)
# SVD
u, s, v = svd(x, full_matrices=False)
v = v.T
# n_components
if isinstance(self.n_components, int):
n_components_ = min(self.n_components, len(s))
elif isinstance(self.n_components, float):
ratio = s.cumsum()/s.sum()
n_components_ = (ratio > self.n_components).argmax() + 1
else:
raise ValueError('Unknown n_components type:', self.n_components)
self.n_components_[a, b] = n_components_
self.n_components_[b, a] = n_components_
# Truncate
u_cols, v_cols = [], []
for i in range(n_components_):
u_cols.append('({},{})_svd{}'.format(a, b, i+1))
v_cols.append('({},{})_svd{}'.format(b, a, i+1))
u = pd.DataFrame(u[:, :n_components_], columns=u_cols, index=x.index)
v = pd.DataFrame(v[:, :n_components_], columns=v_cols, index=x.columns)
# Append to Embeddings
self.embeddings_[a] = self.embeddings_[a].join(u)
self.embeddings_[b] = self.embeddings_[b].join(v)
return self
def transform(self, X):
"""Transform data
Parameters
----------
X : DataFrame, shape [n_samples, n_features]
The data to transform.
Returns
-------
Xt : DataFrame, shape [n_samples, n_features]
Transformed input.
"""
return pd.concat([self.embeddings_[col].loc[x].set_index(x.index)
for col, x in X.items()], axis=1)
class LabelBinarizer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, y):
if len(y) == 0:
raise ValueError(f"y has 0 samples: {y}")
self.y_type_ = type_of_target(y)
self.y_name_ = y.name
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported")
self.classes_ = y.astype('category').values.categories
if len(self.classes_) == 1:
raise ValueError(f"y has single class: {self.classes_}")
elif len(self.classes_) == 2:
self.mapper_ = dict(zip(self.classes_, [0, 1]))
self.inv_mapper_ = dict(zip([0, 1], self.classes_))
elif len(self.classes_) >= 3:
y = pd.DataFrame(y)
self.encoder_ = DummyEncoder().fit(y)
else:
raise ValueError(f"{self.y_type_} target data is not supported")
return self
def transform(self, y):
if self.y_type_ is 'binary':
y = y.map(self.mapper_).astype('uint8')
elif self.y_type_ is 'multiclass':
y = pd.DataFrame(y)
y = self.encoder_.transform(y)
y.columns = self.classes_
return y
def inverse_transform(self, y):
if self.y_type_ is 'binary':
y = y.map(self.inv_mapper_)
elif self.y_type_ is 'multiclass':
y = y.apply(lambda row: row.argmax(), axis=1)
y.name = self.y_name_
return y
class ThermometerEncoder1D(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, y):
self.cats_ = y.astype('category').cat.categories
self.type_ = y.dtype
self.name_ = y.name
return self
def transform(self, y):
y = pd.concat(map(lambda cat: cat <= y, self.cats_), axis=1)
y.columns = self.cats_
y.rename(lambda cat: f"{self.name_}:{cat}", axis=1, inplace=True)
return y.astype('uint8')
def inverse_transform(self, y):
y = pd.Series(self.cats_[y.sum(axis=1)-1], index=y.index,
name=self.name_, dtype=self.type_)
return y
class ThermometerEncoder(ThermometerEncoder1D):
def fit(self, X, y=None):
self.transformers = {}
for col in X.columns:
self.transformers[col] = ThermometerEncoder1D().fit(X[col])
return self
def transform(self, X):
X_list = []
for col, transformer in self.transformers.items():
X_list.append(transformer.transform(X[col]))
return pd.concat(X_list, axis=1)
def inverse_transform(self, X):
X_list = []
for col, transformer in self.transformers.items():
col_filter = ColumnFilter(lambda s: s.startswith(col))
x = col_filter.fit_transform(X)
x = transformer.inverse_transform(x)
X_list.append(x)
return pd.concat(X_list, axis=1)
class GroupByEncoder(BaseEstimator, TransformerMixin):
def __init__(self, func='mean', diff=False):
self.func = func
self.diff = diff
def fit(self, X, y=None):
self.cats_ = list(X.select_dtypes(['category', 'object']))
self.nums_ = list(X.select_dtypes(np.number))
return self
def transform(self, X):
Xt = pd.DataFrame(index=X.index)
for cat in self.cats_:
for num in self.nums_:
col = num+'__'+cat
Xt[col] = X[num].groupby(X[cat]).transform(self.func)
if self.diff: Xt[col] = X[num] - Xt[col]
return Xt | /robusta-0.0.1.tar.gz/robusta-0.0.1/preprocessing/category.py | 0.840815 | 0.550728 | category.py | pypi |
from sklearn.base import BaseEstimator
import numpy as np
__all__ = [
'_StagedClassifier',
'_StagedRegressor',
'_cat_staged_predict',
'_xgb_staged_predict',
'_lgb_staged_predict',
'_get_scores',
]
class _StagedClassifier(BaseEstimator):
_estimator_type = 'classifier'
def predict_proba(self, X):
return X
def predict(self, X):
y = X[:, 1] > 0.5
y = y.astype(int)
return y
class _StagedRegressor(BaseEstimator):
_estimator_type = 'regressor'
def predict(self, X):
return X
def _xgb_staged_predict(estimator, X, max_iter=0, step=1):
booster = estimator.get_booster()
leafs = estimator.apply(X, max_iter)
M = leafs.shape[0]
N = leafs.shape[1]
trees = np.mgrid[0:M, 0:N][1]
return None
def _lgb_staged_predict(estimator, X, max_iter=0, step=1):
booster = estimator.booster_
max_iter = max_iter if max_iter else booster.num_trees()
leafs = booster.predict(X, pred_leaf=True, num_iteration=max_iter)
M = leafs.shape[0]
N = leafs.shape[1]
trees = np.mgrid[0:M, 0:N][1]
mapper = {}
for i in range(estimator.n_estimators):
for j in range(estimator.num_leaves):
mapper[i, j] = booster.get_leaf_output(i, j)
preds = np.vectorize(lambda i, j: mapper[i, j])(trees, leafs).T
preds = preds.cumsum(axis=0)[np.arange(step, max_iter+step, step)-1]
if estimator._estimator_type == 'classifier':
preds = sigmoid(preds)
for pred in preds:
yield np.vstack([1-pred, pred]).T
elif estimator._estimator_type == 'regressor':
for pred in preds:
yield pred
def _cat_staged_predict(estimator, X, max_iter=0, step=1):
if estimator._estimator_type == 'classifier':
return estimator.staged_predict_proba(X, ntree_end=max_iter, eval_period=step)
elif estimator._estimator_type == 'regressor':
return estimator.staged_predict(X, ntree_end=max_iter, eval_period=step)
def _get_scores(estimator, generator, predictor, trn, val, X, y,
scorer, max_iter, step, train_score):
stages = np.arange(step, max_iter+step, step)
trn_scores = []
val_scores = []
if train_score:
X_trn, y_trn = X.iloc[trn], y.iloc[trn]
S_trn = generator(estimator, X_trn, max_iter, step)
for _ in stages:
trn_scores.append(scorer(predictor, next(S_trn), y_trn))
if True:
X_val, y_val = X.iloc[val], y.iloc[val]
S_val = generator(estimator, X_val, max_iter, step)
for _ in stages:
val_scores.append(scorer(predictor, next(S_val), y_val))
return trn_scores, val_scores
def sigmoid(x):
return 1 / (1 + np.exp(-x)) | /robusta-0.0.1.tar.gz/robusta-0.0.1/crossval/_curve.py | 0.805173 | 0.426142 | _curve.py | pypi |
import numpy as np
import pandas as pd
from sklearn.base import clone, BaseEstimator, ClassifierMixin
from sklearn.model_selection import GroupKFold, KFold
from sklearn.exceptions import NotFittedError
from sklearn.utils import check_random_state
from collections import Counter, defaultdict
from itertools import chain
__all__ = [
'StratifiedGroupKFold',
'RepeatedKFold',
'RepeatedGroupKFold',
'RepeatedStratifiedGroupKFold',
'AdversarialValidation',
'make_adversarial_validation',
]
def shuffle_labels(labels, random_state=None):
rstate = np.random.RandomState(random_state)
unique_labels = np.unique(labels)
random_labels = rstate.permutation(unique_labels)
mapper = dict(zip(unique_labels, random_labels))
return labels.map(mapper)
class RepeatedGroupKFold():
"""Repeated Group KFold
Same as RepeatedKFold but each group presents only in one fold on each repeat.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
n_repeats : int, optional
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, optional, default=None
If None, the random number generator is the RandomState instance used by np.random.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
"""
def __init__(self, n_splits=5, n_repeats=3, random_state=0):
self.n_splits = n_splits
self.n_repeats = n_repeats
self.random_state = random_state
def split(self, X, y, groups):
splits = []
for i in range(self.n_repeats):
groups_i = shuffle_labels(groups, self.random_state + i)
cv = GroupKFold(self.n_splits)
split = cv.split(X, y, groups_i)
splits.append(split)
return chain(*splits)
def get_n_splits(self):
return self.n_repeats * self.n_splits
class RepeatedKFold():
"""Repeated KFold (first split DO shuffled)
Same as RepeatedKFold but each group presents only in one fold on each repeat.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
n_repeats : int, optional
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, optional, default=None
If None, the random number generator is the RandomState instance used by np.random.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
"""
def __init__(self, n_splits=5, n_repeats=3, random_state=None):
self.n_splits = n_splits
self.n_repeats = n_repeats
self.random_state = random_state
def split(self, X, y, groups=None):
rstate = check_random_state(self.random_state)
for _ in range(self.n_repeats):
seed = rstate.randint(2**32-1)
cv = KFold(self.n_splits, shuffle=True, random_state=seed)
for trn, oof in cv.split(X, y):
yield trn, oof
def get_n_splits(self):
return self.n_repeats * self.n_splits
class StratifiedGroupKFold():
"""Stratified Group KFold
Same as StratifiedKFold but each group presents only in one fold.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
n_batches : int, default=1024
Split groups to min(n_batches, n_groups) parts.
Must be greater than n_splits.
shuffle : boolean, optional
Whether to shuffle each class’s samples before splitting into batches.
random_state : int, RandomState instance or None, optional, default=0
If None, the random number generator is the RandomState instance used by np.random.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
"""
def __init__(self, n_splits=5, n_batches=1024, shuffle=False, random_state=0):
self.n_splits = n_splits
self.n_batches = n_batches
self.shuffle = shuffle
self.random_state = random_state
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
def split(self, X, y, groups):
# Global stats
groups_unique = set(groups.unique())
labels = np.sort(y.unique())
counts = [groups[y == label].value_counts(sort=False) for label in labels]
counts = pd.concat(counts, axis=1).fillna(0).astype(int)
counts.columns = labels
labels_total = counts.sum()
if self.shuffle:
counts = counts.sample(frac=1, random_state=self.random_state)
# Mini-Batches
n = len(groups_unique)
batch_size = max(n // self.n_batches, 1)
batches = [counts.iloc[k:k+batch_size] for k in range(0, n, batch_size)]
batches.sort(key=lambda batch: -batch.sum().std())
# Local stats (per fold)
fold_labels = pd.DataFrame(0, columns=labels, index=range(self.n_splits))
fold_groups = defaultdict(set)
for batch in batches:
batch_groups = batch.index
batch_labels = batch.sum()
best_idx = None
best_std = None
for i in range(self.n_splits):
fold_labels.loc[i] += batch_labels
fold_std = fold_labels.std().mean()
if best_std is None or fold_std < best_std:
best_std = fold_std
best_idx = i
fold_labels.loc[i] -= batch_labels
fold_labels.loc[best_idx] += batch_labels
fold_groups[best_idx].update(batch_groups)
# Yield indices
for oof_groups in fold_groups.values():
trn_groups = groups_unique - oof_groups
trn = groups[groups.isin(trn_groups)].index
oof = groups[groups.isin(oof_groups)].index
yield trn, oof
class RepeatedStratifiedGroupKFold():
"""Repeated Stratified Group KFold
Same as RepeatedStratifiedKFold but each group presents only in one fold on each repeat.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
n_repeats : int, optional
Number of times cross-validator needs to be repeated.
n_batches : int, default=1024
Split groups to min(n_batches, n_groups) parts.
Must be greater than n_splits.
random_state : int, RandomState instance or None, optional, default=0
If None, the random number generator is the RandomState instance used by np.random.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
"""
def __init__(self, n_splits=5, n_repeats=3, n_batches=1024, random_state=None):
self.n_splits = n_splits
self.n_repeats = n_repeats
self.n_batches = n_batches
self.random_state = random_state
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits * self.n_repeats
def split(self, X, y, groups):
# Global stats
groups_unique = set(groups.unique())
labels = np.sort(y.unique())
counts = [groups[y == label].value_counts(sort=False) for label in labels]
counts = pd.concat(counts, axis=1).fillna(0).astype(int)
counts.columns = labels
labels_total = counts.sum()
for _ in range(self.n_repeats):
counts = counts.sample(frac=1, random_state=self.random_state)
# Mini-Batches
n = len(groups_unique)
batch_size = max(n // self.n_batches, 1)
batches = [counts.iloc[k:k+batch_size] for k in range(0, n, batch_size)]
batches.sort(key=lambda batch: -batch.sum().std())
# Local stats (per fold)
fold_labels = pd.DataFrame(0, columns=labels, index=range(self.n_splits))
fold_groups = defaultdict(set)
for batch in batches:
batch_groups = batch.index
batch_labels = batch.sum()
best_idx = None
best_std = None
for i in range(self.n_splits):
fold_labels.loc[i] += batch_labels
fold_std = fold_labels.std().mean()
if best_std is None or fold_std < best_std:
best_std = fold_std
best_idx = i
fold_labels.loc[i] -= batch_labels
fold_labels.loc[best_idx] += batch_labels
fold_groups[best_idx].update(batch_groups)
# Yield indices
for oof_groups in fold_groups.values():
trn_groups = groups_unique - oof_groups
trn = groups[groups.isin(trn_groups)].index
oof = groups[groups.isin(oof_groups)].index
yield trn, oof
class AdversarialValidation():
"""Adversarial Validation
Holdout split by the train/test similarity. Inner ``classifier`` must be
already fitted to the concatenated dataset with binary target, where 1 means
test set and 0 – train set. Provides list with single train/oof indices,
where oof – subset of size ``test_size`` with maximum class 1 probability.
Parameters
----------
classifier : estimator object
Fitted estimator for train/test similarity measurement.
Class 1 for test set and 0 for train.
train_size : float, int, or None (default=None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
test_size : float, int, None (default=None)
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.2.
"""
def __init__(self, classifier, train_size=None, test_size=None):
self.classifier = classifier
self.train_size = train_size
self.test_size = test_size
def split(self, X, y, groups=None):
if not hasattr(self.classifier, 'classes_'):
raise NotFittedError('Passed classifier must be pre-fitted')
train_size = self._get_train_size(X)
proba = self.classifier.predict_proba(X)
ranks = proba[:, 1].argsort()
yield ranks[:train_size], ranks[train_size:]
def get_n_splits(self):
return 1
def _get_train_size(self, X):
size = len(X)
train_size = self.train_size
test_size = self.test_size
if train_size is not None and test_size is not None:
raise ValueError("train_size and test_size could not be set both")
if train_size is None and test_size is None:
return size - int(size * 0.2)
if train_size is not None:
if isinstance(train_size, float):
if 0 < train_size < 1:
return int(size * train_size)
else:
raise ValueError("Float train_size must be in range (0, 1). "
"Passed {}".format(train_size))
elif isinstance(train_size, int):
if 0 < train_size < size:
return train_size
else:
raise ValueError("Integer train_size must be in range [1, {}]. "
"Passed {}".format(size, train_size))
else:
raise ValueError("Unknown type of train_size passed {}".format(train_size))
if test_size is not None:
if isinstance(test_size, float):
if 0 < test_size < 1:
return size - int(size * test_size)
else:
raise ValueError("Float test_size must be in range (0, 1). "
"Passed {}".format(test_size))
elif isinstance(test_size, int):
if 0 < test_size < size:
return size - test_size
else:
raise ValueError("Integer test_size must be in range [1, {}]. "
"Passed {}".format(size, test_size))
else:
raise ValueError("Unknown type of test_size passed {}".format(test_size))
def make_adversarial_validation(classifier, X_train, X_test, train_size=None, test_size=None):
"""Construct AdversarialValidation object from unfitted classifier.
See AdversarialValidation documentation for details.
Parameters
----------
classifier : estimator object
Estimator for train/test similarity measurement.
Would be fitted on concatenated X_train/X_test dataset.
train_size : float, int, or None (default=None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
test_size : float, int, None (default=None)
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.2.
"""
X = pd.concat([X_train, X_test])
y = [0]*len(X_train) + [1]*len(X_test)
return AdversarialValidation(clone(classifier).fit(X, y),
train_size=train_size,
test_size=test_size) | /robusta-0.0.1.tar.gz/robusta-0.0.1/crossval/schemes.py | 0.752377 | 0.284623 | schemes.py | pypi |
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.model_selection import check_cv
from sklearn.metrics import check_scoring
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
from ._curve import *
def plot_learing_curve(result, X, y, groups=None, max_iter=0, step=1,
mode='mean', train_score=False, n_jobs=None):
"""Plot learning curve for boosting estimators.
Currently supported:
- LGBMClassifier, LGBMRegressor
- CatBoostClassifier, CatBoostRegressor
Parameters
----------
result : dict
Cross-validation results, returned by <crossval> function.
Must contain 'estimator', 'scorer' and 'cv' keys.
X : DataFrame, shape [n_samples, n_features]
The data to fit, score and calculate out-of-fold predictions.
Must be the same as during cross-validation fit.
y : Series, shape [n_samples]
The target variable to try to predict.
Must be the same as during cross-validation fit.
groups : None
Group labels for the samples used while splitting the dataset into
train/test set.
Must be the same as during cross-validation fit.
max_iter : int (default=0)
Maximum number of trees. 0 means all.
step : int (default=1)
If greater than 1, plot score only for trees with indices:
step-1, 2*step-1, 3*step-1 & etc (zero-based indices).
Larger step speeds up prediction.
mode : {'mean', 'fold', 'both'} (default='mean')
- 'mean' : plot average score and std (default)
- 'fold' : plot score of each fold
- 'both' : plot both
train_score : bool (default=False)
Whether to plot learning curve for training scores.
If False, speeds up prediction.
n_jobs : int or None, optional (default=-1)
The number of jobs to run in parallel. None means 1.
Returns
-------
trn_scores : ndarray, shape (n_folds, n_stages)
Train scores learning curve for each fold.
If train_score is False, return None.
val_scores : ndarray, shape (n_folds, n_stages)
Validation scores learning curve for each fold.
"""
estimators = result['estimator']
scorer = result['scorer']
cv = result['cv']
modes = ('mean', 'fold', 'both')
assert mode in modes, f'<mode> must be from {modes}. Found {mode}'
# Estimator Name
estimator = estimators[0]
name = estimator.__class__.__name__
if name.startswith('CatBoost'):
generator = _cat_staged_predict
if max_iter == 0:
max_iter = min([e.tree_count_ for e in estimators])
elif name.startswith('LGB'):
generator = _lgb_staged_predict
if max_iter == 0:
max_iter = min([e.booster_.num_trees() for e in estimators])
elif name.startswith('XGB'):
raise NotImplementedError('XGBoost currently does not supported')
generator = _xgb_staged_predict
if max_iter == 0:
max_iter = min([e.n_estimators for e in estimators])
else:
raise NotImplementedError('Only LGBM and CatBoost currently supported')
# Estimator Type
if estimator._estimator_type == 'classifier':
predictor = _StagedClassifier()
elif estimator._estimator_type == 'regressor':
predictor = _StagedRegressor()
# Predict in Parallel
stages = np.arange(step, max_iter+step, step)
folds = cv.split(X, y, groups)
scores = Parallel(n_jobs=n_jobs)(
delayed(_get_scores)(estimator, generator, predictor, trn, val, X, y,
scorer, max_iter, step, train_score)
for (trn, val), estimator in zip(folds, estimators)
)
trn_scores = np.array([s[0] for s in scores])
val_scores = np.array([s[1] for s in scores])
# Learning Curve(s)
plt.figure()
if not train_score:
trn_scores = None
else:
avg = trn_scores.mean(axis=0)
std = trn_scores.std(axis=0)
if mode in ['mean', 'both']:
plt.fill_between(stages, avg-std, avg+std, alpha=.1, color='b')
plt.plot(stages, avg, label='train score', color='b')
if mode in ['fold', 'both']:
for scores in trn_scores:
plt.plot(stages, scores, '--', color='b', lw=0.5, alpha=0.5)
if True:
avg = val_scores.mean(axis=0)
std = val_scores.std(axis=0)
if mode in ['mean', 'both']:
plt.fill_between(stages, avg-std, avg+std, alpha=.1, color='y')
plt.plot(stages, avg, label='valid score', color='y')
if mode in ['fold', 'both']:
for scores in val_scores:
plt.plot(stages, scores, '--', color='y', lw=0.5, alpha=0.5)
plt.legend()
plt.show()
return trn_scores, val_scores | /robusta-0.0.1.tar.gz/robusta-0.0.1/crossval/plot.py | 0.836254 | 0.581155 | plot.py | pypi |
import pandas as pd
import numpy as np
from sklearn.metrics import roc_curve
from collections import defaultdict
from scipy import interp, stats
import matplotlib.pyplot as plt
import seaborn as sns
from .results import check_cvs
__all__ = [
'compare_roc_auc',
'compare_ttest',
]
def compare_ttest(resultA, resultB, score='val_score', label='label'):
# Check input
assert score in resultA, f"<resultA> has no '{key}'"
assert score in resultB, f"<resultB> has no '{key}'"
a = resultA[score]
b = resultB[score]
assert len(a) == len(b), 'Both scores must be of the same size'
n = len(a)
# Check labels
labels = ['0', '1']
if label in resultA: labels[0] = resultA[label]
if label in resultB: labels[1] = resultB[label]
# t-test
t, p = stats.ttest_rel(a, b)
# Plot
_, axes = plt.subplots(2, 2)
# Plot box
ax = axes[0, 0]
sns.boxplot(labels, [a, b], linewidth=2.0, ax=ax)
ax.grid(alpha=0.2)
# Plot pairs
ax = axes[1, 0]
for x, y in zip(a, b):
ax.plot(labels, [x, y], 'o-', color='b', alpha=0.8)
ax.plot(labels, [np.mean(a), np.mean(b)], 'o-', color='w')
ax.grid(alpha=0.2)
# Plot dist
ax = axes[0, 1]
sns.distplot(a, 10, label=labels[0], ax=ax)
sns.distplot(b, 10, label=labels[1], ax=ax)
ax.grid(alpha=0.2)
ax.legend()
# Plot proba
ax = axes[1, 1]
x_abs = max(5, abs(t))
x_min, x_max = -x_abs, +x_abs
xx = np.arange(t, x_max, 0.001)
yy = stats.t.pdf(xx, n-1)
ax.plot(xx, yy, color='gray')
ax.fill_between(xx, yy, color='gray', alpha=0.2)
xx = np.arange(x_min, t, 0.001)
yy = stats.t.pdf(xx, n-1)
ax.plot(xx, yy, color='r')
ax.fill_between(xx, yy, color='r', alpha=0.2)
ax.legend(['t-value = {:.4f}'.format(t),
'p-value = {:.4f}'.format(p)])
ax.grid(alpha=0.2)
return t, p
def compare_roc_auc(results, X, y, groups=None, labels=None, colors=None,
steps=200):
# Check input
cv = check_cvs(results, X, y, groups)
msg = "<labels> must be of same len as <results>"
if labels:
assert len(labels) == len(results), msg
else:
labels = list(range(len(results)))
msg = "<colors> must be of same len as <results>"
if colors:
assert len(colors) == len(results), msg
else:
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
msg = "Each <result> must have 'estimator' key"
for result in results:
assert 'estimator' in result, msg
# Get curves
avg_fpr = np.linspace(0, 1, steps)
curves = defaultdict(list)
cv = results[0]['cv']
for i, (_, oof) in enumerate(cv.split(X, y, groups)):
X_oof = X.iloc[oof]
y_oof = y.iloc[oof]
for j, result in enumerate(results):
y_pred = result['estimator'][i].predict_proba(X_oof)[:, 1]
fpr, tpr, _ = roc_curve(y_oof, y_pred)
tpr = interp(avg_fpr, fpr, tpr)
tpr[0] = 0.0
curves[labels[j]].append(tpr)
# Plot
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
colors = dict(zip(labels, colors))
plt.figure()
for label, tprs in curves.items():
c = colors[label]
for tpr in tprs:
plt.plot(avg_fpr, tpr, c=c, alpha=0.2)
avg_tpr = np.mean(tprs, axis=0)
plt.plot(avg_fpr, avg_tpr, c=c, label=label)
std_tpr = np.std(tprs, axis=0)
tpr_upper = np.minimum(avg_tpr + std_tpr, 1)
tpr_lower = np.maximum(avg_tpr - std_tpr, 0)
plt.fill_between(avg_fpr, tpr_lower, tpr_upper, color=c, alpha=.1)
plt.legend(loc='lower right')
plt.show() | /robusta-0.0.1.tar.gz/robusta-0.0.1/crossval/compare.py | 0.728362 | 0.692963 | compare.py | pypi |
import pandas as pd
import numpy as np
import gc
from time import time
from sklearn.utils.metaestimators import _safe_split
from sklearn.base import is_classifier, is_regressor
from robusta.importance import get_importance
from scipy.stats import mode
__all__ = [
'_fit_predict',
'_predict',
'_check_avg',
'_avg_preds',
]
def _fit_predict(estimator, method, scorer, X, y, X_new=None, new_index=None,
trn=None, oof=None, return_estimator=False, return_pred=False,
fold=None, logger=None, train_score=False, y_transform=None):
"""Fit estimator and evaluate metric(s), compute OOF predictions & etc.
Parameters
----------
estimator : estimator object
The object to use to fit the data.
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator. For
method='predict_proba', the columns correspond to the classes
in sorted order. Ignored if return_pred=False.
scorer : scorer object
A scorer callable object with signature ``scorer(estimator, X, y)``
which should return only a single value.
X : DataFrame, shape [n_samples, n_features]
The data to fit, score and calculate out-of-fold predictions
y : Series, shape [n_samples]
The target variable to try to predict
X_new : DataFrame, shape [m_samples, n_features] or None
The unseed data to predict (test set)
trn : array or None
Indices of rows, selected to fit estimator. If None, select all.
oof : array or None
Indices of rows, selected to score estimator. If None, select none.
return_pred : bool (default=False)
Return out-of-fold prediction (and test prediction, if X_new is defined)
return_estimator : bool (default=False)
Return fitted estimator
fold : int
Fold index. -1 for full dataset.
logger : object
Logger object
train_score : bool
Return train score (for overfitting detection)
y_transform : callable (default=None)
Transform target before fit
Returns
-------
result : dict of float or Series
Scores/predictions/time of the estimator for each run of the
cross validation. The possible keys for this ``dict`` are:
``score`` : float
The OOF score. If multimetric, return dict of float.
``oof_pred`` : Series, shape [n_samples]
OOF predictions.
Ignored if return_pred=False.
``new_pred`` : Series, shape [m_samples]
Test predictions (unseen data).
Ignored if return_pred=False.
``fit_time`` : float
The time for fitting the estimator
``pred_time`` : float
OOF and test predictions time.
Ignored if return_pred=False.
``score_time`` : float
OOF score time.
``estimator`` : estimator object
The fitted estimator object.
Ignored if return_estimator=False.
``importance`` : Series, shape [n_features]
Extracted feature importances
"""
result = {}
# Data
new = np.arange(X_new.shape[0]) if X_new is not None else np.arange(0)
trn = np.arange(X.shape[0]) if trn is None else trn
oof = np.arange(0) if oof is None else oof
X_trn, y_trn = _safe_split(estimator, X, y, trn)
X_oof, y_oof = _safe_split(estimator, X, y, oof)
oof_index = getattr(X_oof, 'index', y_oof.index)
new_index = getattr(X_new, 'index', new_index)
y_trn_ = y_transform(y_trn) if y_transform else y_trn
# Estimator
tic = time()
estimator.fit(X_trn, y_trn_)
result['fit_time'] = time() - tic
if return_estimator:
result['estimator'] = estimator
# Feature Importances
try:
result['importance'] = get_importance(estimator)
except:
pass
# Predict
if return_pred and (len(oof) or len(new)):
tic = time()
if len(oof):
oof_pred = _predict(estimator, method, X_oof, y, oof_index)
result['oof_pred'] = oof_pred
if len(new):
new_pred = _predict(estimator, method, X_new, y, new_index)
result['new_pred'] = new_pred
result['pred_time'] = time() - tic
# Score
tic = time()
if scorer and len(oof):
result['val_score'] = scorer(estimator, X_oof, y_oof)
if scorer and train_score:
result['trn_score'] = scorer(estimator, X_trn, y_trn)
result['score_time'] = time() - tic
# Logs
if logger:
logger.log(fold, result)
return result
def _predict(estimator, method, X, y, index=None):
"""Call <method> of fitted <estimator> on data <X>.
Parameters
----------
estimator : estimator object
Fitted estimator
method : iterable of string
Feature names
X : DataFrame or 2d-array
Data to predict
y : string
Target (used for prediction formatting).
Could be already seen.
index : iterable
X indices (used for prediction formatting).
Returns
-------
P : Series or DataFrame
Computed predictions
"""
Y = pd.DataFrame(y)
name = estimator.__class__.__name__
# Call method
action = getattr(estimator, method, None)
if action:
P = action(X)
else:
raise AttributeError("<{}> has no method <{}>".format(name, method))
# Format
index = getattr(X, 'index', index)
if isinstance(P, list):
P = [pd.DataFrame(p, index=index) for p in P]
else:
P = [pd.DataFrame(P, index=index)]
P = pd.concat(P, axis=1)
if method is 'predict':
# [classifier + predict] OR [regressor]
P.columns = [y.name] if hasattr(y, 'name') else Y.columns
elif is_classifier(estimator):
# [classifier + predict_proba]
if name in ['MultiOutputClassifier', 'MultiTargetClassifier']:
# Multiple output classifier
Classes = estimator.classes_
tuples = []
for target, classes in zip(Y.columns, Classes):
for c in classes:
tuples.append((target, c))
P.columns = pd.MultiIndex.from_tuples(tuples, names=('_TARGET', '_CLASS'))
elif hasattr(estimator, 'classes_'):
# Single output classifier
classes = estimator.classes_
P.columns = classes
else:
# Unknown classifier
msg = "Classifier <{}> should has <classes_> attribute!".format(name)
raise AttributeError(msg)
else:
# Ranker & etc
est_type = getattr(estimator, "_estimator_type", None)
raise TypeError('<{}> is an estimator of unknown type: \
<{}>'.format(name, est_type))
return P
def _check_avg(estimator, avg_type, method):
name = estimator.__class__.__name__
# Basic <method> and <avg_type> values check
methods = ['predict', 'predict_proba']
if method not in methods:
raise ValueError("<method> should be in {}".format(set(methods)) \
+ "\n\t\tPassed '{}'".format(method))
avg_types = ['mean', 'soft', 'hard', 'rank', 'auto', 'pass']
if avg_type not in avg_types:
raise ValueError("<avg_type> should be in {}".format(set(avg_types)) \
+ "\n\t\tPassed '{}'".format(avg_type))
# Compatibility check
if is_classifier(estimator) and hasattr(estimator, 'predict_proba'):
# classifier (probabilistic)
if method is 'predict_proba':
if avg_type is 'pass':
avg = _pass_pred
elif avg_type is 'rank':
avg = _rank_pred
elif avg_type in ['auto', 'mean']:
avg = _mean_pred
else:
good_vals = {'mean', 'auto', 'pass'}
bad_vals = {'soft', 'hard'}
msg = "Selected <method> value is {}.".format(method) \
+ "\n\t\tAvailable <avg_type> options are: {}.".format(good_vals) \
+ "\n\t\tBut current <avg_type> value set to '{}'.".format(avg_type) \
+ "\n\t\t" \
+ "\n\t\tNote: {} are voting strategies, so".format(bad_vals) \
+ "\n\t\tthey are available only for method='predict'."
raise ValueError(msg)
elif method is 'predict':
if avg_type in ['soft', 'auto']:
method = 'predict_proba'
avg = _soft_vote
elif avg_type is 'hard':
avg = _hard_vote
elif avg_type is 'pass':
avg = _pass_pred
else:
raise ValueError("Passed unavailable avg_type '{}' for method <{}>"
"".format(avg_type, method))
elif method is 'decision_function':
if avg_type in ['mean', 'auto']:
avg = _mean_pred
elif avg_type is 'rank':
avg = _rank_pred
elif avg_type is 'pass':
avg = _pass_pred
else:
raise ValueError("Passed unavailable avg_type '{}' for method <{}>"
"".format(avg_type, method))
elif is_classifier(estimator) and not hasattr(estimator, 'predict_proba'):
# classifier (non-probabilistic)
if method is 'predict_proba':
msg = "<{}> is not available for <{}>".format(method, name)
raise AttributeError(msg)
elif method in ['predict', 'decision_function']:
if avg_type in ['hard', 'auto']:
avg = _hard_vote
elif avg_type is 'pass':
avg = _pass_pred
else:
vals = {'auto', 'hard', 'pass'}
msg = "<{}> is a {}. ".format(name, 'non-probabilistic classifier') \
+ "\n\t\tAvailable <avg_type> options are: {}".format(vals) \
+ "\n\t\tCurrent value set to '{}'".format(avg_type)
raise ValueError(msg)
elif is_regressor(estimator):
# regressor
if avg_type is 'pass':
avg = _pass_pred
method = 'predict'
elif avg_type in ['mean', 'auto']:
avg = _mean_pred
method = 'predict'
else:
vals = {'mean', 'auto', 'pass'}
msg = "<{}> is a {}. ".format(name, 'regressor') \
+ "\n\t\tAvailable <avg_type> options are: {}".format(vals) \
+ "\n\t\tCurrent value set to '{}'".format(avg_type)
raise ValueError(msg)
return avg, method
def _avg_preds(preds, avg, X, y, index=None):
# Concat & sort
index = getattr(X, 'index', index)
pred = pd.concat(preds, axis=1)
pred = pred.loc[index]
del preds
gc.collect()
# Average
pred = avg(pred)
# Simplify
if hasattr(pred, 'columns') and pred.shape[1] == 1:
pred = pred.iloc[:, 0] # regression
pred = _drop_zero_class(pred, y) # binary classifier
return pred
def _drop_zero_class(pred, y):
# Check if task is not regression
if len(pred.shape) < 2:
return pred
# Check if avg_type is not 'pass'
if (pred.columns.value_counts() > 1).any():
return pred
if hasattr(pred.columns, 'levels'):
targets = pred.columns.get_level_values(0).unique()
preds = [pred.loc[:, target] for target in targets]
is_binary = [list(p.columns) in [['0','1'], [0, 1]] for p in preds]
is_binary = np.array(is_binary).all()
if is_binary:
preds = [p.loc[:, 1] for p in preds]
pred = pd.concat(preds, axis=1)
pred.columns = targets
pred.columns.name = None
elif list(pred.columns) == ['0','1'] \
or list(pred.columns) == [0, 1]:
pred = pred.iloc[:, 1]
pred.name = y.name
return pred
def _pass_pred(pred):
return pred
def _mean_pred(pred):
if hasattr(pred.columns, 'levels'):
return _multioutput_pred(pred, _mean_pred)
else:
return pred.groupby(pred.columns, axis=1).mean()
def _rank_pred(pred):
if hasattr(pred.columns, 'levels'):
return _multioutput_pred(pred, _rank_pred)
else:
return pred.rank(pct=True).groupby(pred.columns, axis=1).mean()
def _soft_vote(pred):
if hasattr(pred.columns, 'levels'):
return _multioutput_vote(pred, _soft_vote)
else:
return pred.groupby(pred.columns, axis=1).mean().idxmax(axis=1)
def _hard_vote(pred):
if hasattr(pred.columns, 'levels'):
return _multioutput_vote(pred, _hard_vote)
else:
return pred.apply(lambda x: mode(x)[0][0], axis=1)
def _multioutput_vote(pred, vote):
targets = pred.columns.get_level_values(0).unique()
preds = [pred.loc[:, target] for target in targets]
preds = [vote(p) for p in preds]
pred = pd.concat(preds, axis=1)
pred.columns = targets
pred.columns.name = None
return pred
def _multioutput_pred(pred, avg):
cols = pred.columns.unique()
targets = pred.columns.get_level_values(0).unique()
preds = [pred.loc[:, target] for target in targets]
preds = [avg(p) for p in preds]
pred = pd.concat(preds, axis=1)
pred.columns = cols
return pred | /robusta-0.0.1.tar.gz/robusta-0.0.1/crossval/_predict.py | 0.724091 | 0.534673 | _predict.py | pypi |
import pandas as pd
import numpy as np
from joblib import Parallel, delayed
from datetime import datetime
from time import time
from sklearn.base import clone, is_classifier
from sklearn.model_selection import check_cv
from sklearn.metrics import check_scoring
from sklearn.utils import indexable
from robusta.utils import logmsg, ld2dl
from ._predict import _fit_predict, _check_avg, _avg_preds
from ._verbose import CVLogger
__all__ = [
'crossval',
'crossval_score',
'crossval_predict',
]
def copy(estimator):
if hasattr(estimator, 'copy'):
return estimator.copy()
else:
return clone(estimator)
def crossval(estimator, cv, X, y, groups=None, X_new=None, new_index=None,
scoring=None, test_avg=True, avg_type='auto', method='predict',
return_pred=True, return_estimator=False, verbose=2, n_digits=4,
n_jobs=None, compact=False, train_score=False, y_transform=None,
**kwargs):
"""Evaluate metric(s) by cross-validation and also record fit/score time,
feature importances and compute out-of-fold and test predictions.
Parameters
----------
estimator : estimator object
The object to use to fit the data.
cv : int, cross-validation generator or an iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
X : DataFrame, shape [n_samples, n_features]
The data to fit, score and calculate out-of-fold predictions
y : Series, shape [n_samples]
The target variable to try to predict
groups : None
Group labels for the samples used while splitting the dataset into
train/test set
X_new : DataFrame, shape [m_samples, n_features] or None
The unseed data to predict (test set)
new_index : iterable or None
Indices for test set if passed X_new is not DataFrames.
Ignored if X_new is DataFrame or None.
test_avg : bool
Stacking strategy (essential parameter)
- True: bagged predictions for test set (given that we have N folds,
we fit N models on each fold's train data, then each model
predicts test set, then we perform bagging: compute mean of
predicted values (for regression or class probabilities) - or
majority vote: compute mode (when predictions are class labels)
- False: predictions for tests set (estimator is fitted once on full
train set, then predicts test set)
Ignored if return_pred=False or X_new is not defined.
scoring : string, callable or None, optional, default: None
A string or a scorer callable object / function with signature
``scorer(estimator, X, y)`` which should return only a single value.
If None, the estimator's default scorer (if available) is used.
avg_type : string, {'mean', 'soft', 'hard', 'auto', 'rank', 'pass'} (default='auto')
Averaging strategy for aggregating different CV folds predictions
- 'hard' : use predicted class labels for majority rule voting.
Ignored if estimator type is 'regressor'.
Ignored if <return_pred> set to False.
Ignored if <method> is not 'predict'.
- 'soft' : predicts the class label based on the argmax of the sums
of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
Ignored if estimator type is 'regressor'.
Ignored if <return_pred> set to False.
Ignored if <method> is not 'predict'.
- 'auto' : use simple averaging for regressor's predcitions and for
classifier's probabilities (if <method> is 'predict_proba');
if estimator type is 'classifier' and <method> is 'predict',
set <averaging> to 'soft' for classifier with <predict_proba>
attribute, set <averaging> to 'hard' for other.
Ignored if <return_pred> set to False.
- 'rank' : ranking probabilities along fold and averaging.
Prefered for scoring like 'AUC-ROC'.
- 'pass' : leave predictions of different folds separated.
Column '_FOLD' will be added.
- 'mean' : simple averaging of classifier's probabilities or
regressor's predictions.
Ignored if <return_pred> set to False, or <method> is not 'predict'.
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator. For
method='predict_proba', the columns correspond to the classes
in sorted order.
Ignored if return_pred=False.
return_pred : bool (default=False)
Return out-of-fold predictions (and test predictions, if X_new is defined)
return_estimator : bool (default=False)
Return fitted estimators
n_jobs : int or None, optional (default=-1)
The number of jobs to run in parallel. None means 1.
verbose : int (default=1)
Verbosity level
n_digits : int (default=4)
Verbose score(s) precision
compact : bool (default=False)
Print verbose in one line. Useful for evaluating series of estimators.
train_score : bool (default=False)
If True, print and return train score for each fold.
y_transform : callable (default=None)
Transform target before fit
Returns
-------
result : dict of array, float or Series
Array of scores/predictions/time of the estimator for each run of the
cross validation. If test_avg=True, arrays has shape [n_splits],
otherwise [n_splits+1] except score & score_time.
The possible keys for this ``dict`` are:
``fold`` : list of pair of list
Two lists with trn/oof indices
``scorer`` : scorer object
Func with signature scorer(estimator, X, y)
``val_score`` : array or dict of array, shape [n_splits]
The score array for test scores on each cv split.
If multimetric, return dict of array.
``trn_score`` : array or dict of array, shape [n_splits]
The score array for train scores on each cv split.
If multimetric, return dict of array.
``oof_pred`` : Series, shape [n_samples]
Out-of-fold predictions.
Ignored if return_pred=False.
``new_pred`` : Series, shape [m_samples]
Test predictions (unseen data).
Ignored if return_pred=False.
``fit_time`` : array of float, shape [n_splits] or [n_splits+1]
The time for fitting the estimator on the train
set for each cv split.
``pred_time`` : array of float, shape [n_splits] or [n_splits+1]
Out-of-fold and test predictions time.
Ignored if return_pred=False.
``score_time`` : array of float, shape [n_splits]
Out-of-fold scores time for each cv split.
``concat_time`` : float
Extra time spent on concatenation of predictions, importances
or scores dictionaries. Ignored if all of return_pred,
return_importance, return_score are set to False.
``estimator`` : list of estimator object, shape [n_splits] or [n_splits+1]
The fitted estimator objects for each cv split (and ).
Ignored if return_estimator=False.
``importance`` : list of arrays, shape [n_splits, n_features]
List of importances. If estimator has <coef_> attribute,
return np.abs(coef_).
``features`` : list, shape [n_features]
List of features.
"""
# Check parameters
X, y, groups = indexable(X, y, groups)
X_new, _ = indexable(X_new, None)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
avg, method = _check_avg(estimator, avg_type, method)
scorer = check_scoring(estimator, scoring)
# Fit & predict
logger = CVLogger(estimator, cv, verbose, n_digits, compact)
logger.start()
parallel = Parallel(max_nbytes='256M', pre_dispatch='2*n_jobs',
n_jobs=n_jobs, require='sharedmem')
if test_avg:
# Stacking Type A (test averaging = True)
result = parallel(
delayed(_fit_predict)(
copy(estimator), method, scorer, X, y, X_new, new_index,
trn, oof, return_estimator, return_pred, fold, logger,
train_score, y_transform)
for fold, (trn, oof) in enumerate(cv.split(X, y, groups)))
result = ld2dl(result)
else:
# Stacking Type B (test_averaging = False)
result = parallel(
(delayed(_fit_predict)(
copy(estimator), method, scorer, X, y, None, None, trn, oof,
return_estimator, return_pred, fold, logger, train_score,
y_transform)
for fold, (trn, oof) in enumerate(cv.split(X, y, groups))))
if verbose >= 2:
print()
logmsg('Fitting full train set...')
result_new = _fit_predict(copy(estimator), method, None, X, y, X_new,
new_index, None, None, return_estimator,
return_pred, -1, logger, train_score,
y_transform)
result = ld2dl(result)
for key, val in result_new.items():
if key in result:
result[key].append(val)
else:
result[key] = [val]
# Concat Predictions (& Feature Importances)
needs_concat = ['oof_pred', 'new_pred', 'importance', 'val_score', 'trn_score']
if np.any(np.in1d(needs_concat, list(result))):
tic = time()
if 'oof_pred' in result:
oof_preds = result['oof_pred']
oof_pred = _avg_preds(oof_preds, avg, X, y, y.index)
result['oof_pred'] = oof_pred
if 'new_pred' in result:
new_preds = result['new_pred']
new_pred = _avg_preds(new_preds, avg, X_new, y, new_index)
result['new_pred'] = new_pred
for key in ['fit_time', 'score_time', 'pred_time']:
if key in result:
result[key] = np.array(result[key])
result['concat_time'] = time() - tic
if hasattr(X, 'columns'): result['features'] = list(X.columns.values)
result['datetime'] = datetime.now()
result['scorer'] = scorer
result['cv'] = cv
# Final score
logger.end(result)
# Additional kwargs
result.update(kwargs)
return result
def crossval_score(estimator, cv, X, y, groups=None, scoring=None, n_jobs=None,
verbose=2, n_digits=4, compact=False, train_score=False,
target_func=None):
"""Evaluate metric(s) by cross-validation and also record fit/score time,
feature importances and compute out-of-fold and test predictions.
Parameters
----------
estimator : estimator object
The object to use to fit the data.
cv : int, cross-validation generator or an iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
X : DataFrame, shape [n_samples, n_features]
The data to fit, score and calculate out-of-fold predictions
y : Series, shape [n_samples]
The target variable to try to predict
groups : None
Group labels for the samples used while splitting the dataset into
train/test set
scoring : string, callable or None, optional, default: None
A string or a scorer callable object / function with signature
``scorer(estimator, X, y)`` which should return only a single value.
If None, the estimator's default scorer (if available) is used.
Ignored if return_score=False.
n_jobs : int or None, optional (default=-1)
The number of jobs to run in parallel. None means 1.
verbose : int (default=1)
Verbosity level
n_digits : int (default=4)
Verbose score(s) precision
random_state : int (default=0)
Cross-Validation seed
compact : bool (default=False)
Print verbose in one line. Useful for evaluating series of estimators.
train_score : bool (default=False)
If True, print train score for each fold.
Returns
-------
scores : list of float
Rows are splits. If multimetric, return DataFrame, where each column
represents different metric.
"""
result = crossval(estimator, cv, X, y, groups, n_digits=n_digits,
scoring=scoring, n_jobs=n_jobs, verbose=verbose,
return_pred=False, compact=compact,
train_score=train_score, target_func=target_func)
scores = result['val_score']
return scores
def crossval_predict(estimator, cv, X, y, groups=None, X_new=None, new_index=None,
test_avg=True, avg_type='auto', method='predict', scoring=None,
n_jobs=None, verbose=0, n_digits=4, compact=False,
train_score=False, target_func=None):
"""Get Out-of-Fold and Test predictions.
Parameters
----------
estimator : estimator object
The object to use to fit the data.
cv : int, cross-validation generator or an iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : DataFrame, shape [n_samples, n_features]
The data to fit, score and calculate out-of-fold predictions
y : Series, shape [n_samples]
The target variable to try to predict
groups : None
Group labels for the samples used while splitting the dataset into
train/test set
X_new : DataFrame, shape [m_samples, n_features] or None
The unseed data to predict (test set)
new_index : iterable or None
Indices for test set if passed X_new is not DataFrames.
Ignored if X_new is DataFrame or None.
test_avg : bool
Stacking strategy (essential parameter)
- True: bagged predictions for test set (given that we have N folds,
we fit N models on each fold's train data, then each model
predicts test set, then we perform bagging: compute mean of
predicted values (for regression or class probabilities) - or
majority vote: compute mode (when predictions are class labels)
- False: predictions for tests set (estimator is fitted once on full
train set, then predicts test set)
avg_type : string, {'soft', 'hard', 'rank', 'auto', 'pass'} (default='auto')
Averaging strategy for aggregating different CV folds predictions
- 'hard' : use predicted class labels for majority rule voting.
Ignored if estimator type is 'regressor'.
Ignored if <method> is not 'predict'.
- 'soft' : predicts the class label based on the argmax of the sums
of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
Ignored if estimator type is 'regressor'.
Ignored if <method> is not 'predict'.
- 'rank' : ranking probabilities along fold and averaging.
Prefered for scoring like 'AUC-ROC'.
- 'auto' : use simple averaging for regressor's predcitions and for
classifier's probabilities (if <method> is 'predict_proba');
if estimator type is 'classifier' and <method> is 'predict',
set <averaging> to 'soft' for classifier with <predict_proba>
attribute, set <averaging> to 'hard' for other.
- 'pass' : leave predictions of different folds separated.
Column '_FOLD' will be added.
- 'mean' : simple averaging of classifier's probabilities or
regressor's predictions.
Ignored if <return_pred> set to False, or <method> is not 'predict'.
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator. For
method='predict_proba', the columns correspond to the classes
in sorted order.
scoring : string, callable or None, optional, default: None
A string or a scorer callable object / function with signature
``scorer(estimator, X, y)`` which should return only a single value.
If None, the estimator's default scorer (if available) is used.
n_jobs : int or None, optional (default=-1)
The number of jobs to run in parallel. None means 1.
verbose : int (default=1)
Verbosity level
n_digits : int (default=4)
Verbose score(s) precision
random_state : int (default=0)
Cross-Validation seed
compact : bool (default=False)
Print verbose in one line. Useful for evaluating series of estimators.
train_score : bool (default=False)
If True, print train score for each fold.
Returns
-------
oof_pred : Series, shape [n_samples]
Out-of-fold predictions
new_pred : Series, shape [m_samples] or None
Test predictions (unseen data)
None if X_new is not defined
"""
result = crossval(estimator, cv, X, y, groups, X_new=X_new, scoring=scoring,
avg_type=avg_type, method=method, test_avg=test_avg,
n_jobs=n_jobs, verbose=verbose, n_digits=n_digits,
compact=compact, train_score=train_score,
target_func=target_func)
oof_pred = result['oof_pred'] if 'oof_pred' in result else None
new_pred = result['new_pred'] if 'new_pred' in result else None
return oof_pred, new_pred | /robusta-0.0.1.tar.gz/robusta-0.0.1/crossval/crossval.py | 0.867092 | 0.550728 | crossval.py | pypi |
from sklearn.utils.random import check_random_state
from sklearn.model_selection import check_cv
from sklearn.metrics import check_scoring
from sklearn.base import (
BaseEstimator,
MetaEstimatorMixin,
clone,
is_classifier,
)
from tqdm import tqdm_notebook as tqdm
from .importance import get_importance
from ..crossval import crossval
import numpy as np
def _shuffle_data(*data, seed):
x_index = data[0].index
data_ = []
for x in data:
if x is not None:
x = x.sample(frac=1, random_state=seed)
x.index = x_index
data_.append(x)
return data_
class ShuffleTargetImportance(BaseEstimator, MetaEstimatorMixin):
"""Shuffle Target importance for feature evaluation.
Parameters
----------
estimator : object
The base estimator. This can be both a fitted
(if ``prefit`` is set to True) or a non-fitted estimator.
cv : int, cross-validation generator or an iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
scoring : string, callable or None, default=None
Scoring function to use for computing feature importances.
A string with scoring name (see scikit-learn docs) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
n_repeats : int, default 5
The number of random shuffle iterations. Decrease to improve speed,
increase to get more precise estimates.
mode : {'dif', 'div'}, default='dif'
How to calculate mode between importances after shuffling and benchmark.
- 'dif': for difference between importances (absolute mode).
- 'div': for division between importances (relative mode).
tqdm : bool, default=False
Whether to display <tqdm_notebook> progress bar while iterating
through out dataset columns.
verbose : int, default=0
Verbosity level
n_jobs : int, default -1
The number of jobs to run in parallel. None means 1.
random_state : integer or numpy.random.RandomState, optional
Pseudo-random number generator to control the permutations of each feature.
cv_kwargs : dict
Key arguments for inner crossval function
Attributes
----------
feature_importances_ : Series, shape (n_groups, )
Feature importances, computed as mean decrease of the importance when
a target is shuffled (i.e. becomes noise).
feature_importances_std_ : Series, shape (n_groups, )
Standard deviations of feature importances.
raw_importances_ : list of Dataframes, shape (n_folds, n_groups, n_repeats)
scores_ : list of floats, shape (n_folds, )
"""
def __init__(self, estimator, cv, scoring=None, n_repeats=5, mode='dif',
tqdm=False, verbose=0, n_jobs=None, random_state=None,
cv_kwargs={}):
self.estimator = estimator
self.scoring = scoring
self.cv = cv
self.cv_kwargs = cv_kwargs
self.n_repeats = n_repeats
self.mode = mode
self.tqdm = tqdm
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
def fit(self, X, y, groups=None, **fit_params):
rstate = check_random_state(self.random_state)
msg = "<n_repeats> must be positive integer"
assert isinstance(self.n_repeats, int) and self.n_repeats > 0, msg
msg = "<mode> must be in {'dif', 'div'}"
assert self.mode in ['dif', 'div'], msg
self.bench_importances_ = []
self.shuff_importances_ = []
self.raw_importances_ = []
self.bench_scores_ = []
self.shuff_scores_ = []
self.scores_ = []
iters = range(self.n_repeats)
iters = tqdm(iters) if self.tqdm else iters
for _ in iters:
seed = rstate.randint(2**32-1)
X_, y_, groups_ = _shuffle_data(X, y, groups, seed=seed)
# Benchmark
result = crossval(self.estimator, self.cv, X_, y_, groups_,
scoring=self.scoring, verbose=self.verbose,
return_estimator=True, return_pred=False,
n_jobs=self.n_jobs, **self.cv_kwargs)
for e in result['estimator']:
self.bench_importances_.append(get_importance(e) + 1) # +1 to avoid 0/0
self.scores_.append(np.mean(result['val_score']))
self.bench_scores_.append(result['val_score'])
# Shuffle
result = crossval(self.estimator, self.cv, X, y_, groups,
scoring=self.scoring, verbose=self.verbose,
return_estimator=True, return_pred=False,
n_jobs=self.n_jobs, **self.cv_kwargs)
for e in result['estimator']:
self.shuff_importances_.append(get_importance(e) + 1) # +1 to avoid 0/0
self.shuff_scores_.append(result['val_score'])
# Relative/Absolute Mode
for b, s in zip(self.bench_importances_, self.shuff_importances_):
if self.mode == 'dif': self.raw_importances_.append(b - s)
if self.mode == 'div': self.raw_importances_.append(b / s)
imps = self.raw_importances_
self.feature_importances_ = np.average(imps, axis=0)
self.feature_importances_std_ = np.std(imps, axis=0)
return self | /robusta-0.0.1.tar.gz/robusta-0.0.1/importance/shuffle.py | 0.84966 | 0.531392 | shuffle.py | pypi |
import pandas as pd
import numpy as np
from pympler.asizeof import asizeof
from itertools import combinations, chain
def all_subsets(cols, k_range):
return chain(*map(lambda k: combinations(cols, k), k_range))
def get_ranks(arr, normalize=False):
arr = np.array(arr)
ind = np.argsort(arr)
arr[ind] = np.arange(len(arr))
return arr / sum(arr) if normalize else arr
def bytefmt(n, rnd=True):
'''Convert number of bytes to string.
See <rnd> parameter documentation.
Parameters
----------
n : int
Number of bytes
rnd : bool, default=True
If True, return number of bytes, rounded to largest unit.
E.g. '9 KB 784 bytes' -> '9.8 KB'.
Ignored if <fmt> set to False.
Returns
-------
s : string
Formatted string
'''
byte_units = {
'TB': 2**40,
'GB': 2**30,
'MB': 2**20,
'KB': 2**10,
'bytes': 2**0,
}
arr = []
for unit, base in byte_units.items():
if n // base > 0:
k = n // base
n = n % base
if rnd:
k += round(n / base, 1)
n = 0
if k == int(k):
k = int(k)
arr.append('{} {}'.format(k, unit))
return ' '.join(arr)
def sizeof(obj, fmt=True, rnd=True):
'''Return full size of (nested) object.
Parameters
----------
obj : object
Any object to determine memory size
fmt : bool, default=True
If True, return formatted size (string). E.g. 10000 -> '9.8 KB'.
Otherwise return number of bytes (integer).
rnd : bool, default=True
If True, return number of bytes, rounded to largest unit.
E.g. '9 KB 784 bytes' -> '9.8 KB'.
Ignored if <fmt> set to False.
Returns
-------
s : int or string
Formatted string or exact number of bytes
'''
n = asizeof(obj)
s = bytefmt(n, rnd) if fmt else n
return s
def ld2dl(ld):
'''Convert list of dict to dict of list
Parameters
----------
ld : list of dict
List of homogeneous dictionaries
Returns
-------
dl : dict of list
Dictionary of list of equal length
'''
dl = {key: [d[key] for d in ld] for key in ld[0].keys()}
return dl | /robusta-0.0.1.tar.gz/robusta-0.0.1/utils/_utils.py | 0.474875 | 0.482185 | _utils.py | pypi |
import os, sys
import tempfile
import weakref
import rpy2.rinterface
rpy2.rinterface.initr()
from . import conversion
class RSlots(object):
""" Attributes of an R object as a Python mapping.
The parent proxy to the underlying R object is held as a
weak reference. The attributes are therefore not protected
from garbage collection unless bound to a Python symbol or
in an other container.
"""
__slots__ = ['_robj', ]
def __init__(self, robj):
self._robj = weakref.proxy(robj)
def __getitem__(self, key):
value = self._robj.do_slot(key)
return conversion.ri2ro(value)
def __setitem__(self, key, value):
rpy2_value = conversion.py2ri(value)
self._robj.do_slot_assign(key, rpy2_value)
def __len__(self):
return len(self._robj.list_attrs())
def keys(self):
for k in self._robj.list_attrs():
yield k
__iter__=keys
def items(self):
for k in self._robj.list_attrs():
v = self[k]
yield (k, v)
def values(self):
for k in self._robj.list_attrs():
v = self[k]
yield v
class RObjectMixin(object):
""" Class to provide methods common to all RObject instances. """
__rname__ = None
__tempfile = rpy2.rinterface.baseenv.get("tempfile")
__file = rpy2.rinterface.baseenv.get("file")
__fifo = rpy2.rinterface.baseenv.get("fifo")
__sink = rpy2.rinterface.baseenv.get("sink")
__close = rpy2.rinterface.baseenv.get("close")
__readlines = rpy2.rinterface.baseenv.get("readLines")
__unlink = rpy2.rinterface.baseenv.get("unlink")
__rclass = rpy2.rinterface.baseenv.get("class")
__rclass_set = rpy2.rinterface.baseenv.get("class<-")
__show = rpy2.rinterface.baseenv.get("show")
__slots = None
@property
def slots(self):
""" Attributes of the underlying R object as a Python mapping.
The attributes can accessed and assigned by name (as if they
were in a Python `dict`)."""
if self.__slots is None:
self.__slots = RSlots(self)
return self.__slots
def __repr__(self):
try:
rclasses = ('R object with classes: {} mapped to:'
.format(tuple(self.rclass)))
except:
rclasses = 'Unable to fetch R classes.' + os.linesep
res = os.linesep.join((rclasses,
super(RObjectMixin, self).__repr__()))
return res
def __str__(self):
if sys.platform == 'win32':
tmpf = tempfile.NamedTemporaryFile(mode="w+", delete=False)
tfname = tmpf.name
tmp = self.__file(rpy2.rinterface.StrSexpVector([tfname,]),
open=rpy2.rinterface.StrSexpVector(["r+", ]))
self.__sink(tmp)
else:
writeconsole = rpy2.rinterface.get_writeconsole_regular()
s = []
def f(x):
s.append(x)
rpy2.rinterface.set_writeconsole_regular(f)
self.__show(self)
if sys.platform == 'win32':
self.__sink()
s = tmpf.readlines()
tmpf.close()
self.__close(tmp)
try:
del tmpf
os.unlink(tfname)
except WindowsError:
if os.path.exists(tfname):
print('Unable to unlink tempfile %s' % tfname)
s = str.join(os.linesep, s)
else:
rpy2.rinterface.set_writeconsole_regular(writeconsole)
s = str.join('', s)
return s
def __getstate__(self, ):
return (super().__getstate__(), self.__dict__.copy())
def __setstate__(self, state):
rds, __dict__ = state
super().__setstate__(rds)
self.__dict__.update(__dict__)
def r_repr(self):
""" String representation for an object that can be
directly evaluated as R code.
"""
return repr_robject(self, linesep='\n')
def _rclass_get(self):
try:
res = self.__rclass(self)
#res = conversion.ri2py(res)
return res
except rpy2.rinterface.RRuntimeError as rre:
if self.typeof == rpy2.rinterface.SYMSXP:
#unevaluated expression: has no class
return (None, )
else:
raise rre
def _rclass_set(self, value):
if isinstance(value, str):
value = (value, )
new_cls = rpy2.rinterface.StrSexpVector(value)
res = self.__rclass_set(self, new_cls)
self.__sexp__ = res.__sexp__
rclass = property(_rclass_get, _rclass_set, None,
"""
R class for the object, stored as an R string vector.
When setting the rclass, the new value will be:
- wrapped in a Python tuple if a string (the R class
is a vector of strings, and this is made for convenience)
- wrapped in a StrSexpVector
Note that when setting the class R may make a copy of
the whole object (R is mostly a functional language).
If this must be avoided, and if the number of parent
classes before and after the change are compatible,
the class name can be changed in-place by replacing
vector elements.""")
def repr_robject(o, linesep=os.linesep):
s = rpy2.rinterface.baseenv.get("deparse")(o)
s = str.join(linesep, s)
return s
class RObject(RObjectMixin, rpy2.rinterface.Sexp):
""" Base class for all non-vector R objects. """
def __setattr__(self, name, value):
if name == '_sexp':
if not isinstance(value, rpy2.rinterface.Sexp):
raise ValueError("_attr must contain an object " +\
"that inherits from rpy2.rinterface.Sexp" +\
"(not from %s)" %type(value))
super(RObject, self).__setattr__(name, value) | /robustbasePy-1.1.tar.gz/robustbasePy-1.1/lmrob/robjects/robject.py | 0.480235 | 0.282363 | robject.py | pypi |
import os
import warnings
from types import ModuleType
from collections import defaultdict
from warnings import warn
import rpy2.rinterface as rinterface
import rpy2.robjects.lib
from . import conversion
from rpy2.robjects.functions import (SignatureTranslatedFunction,
docstring_property,
DocumentedSTFunction)
from rpy2.robjects.constants import NULL
from rpy2.robjects import Environment
from rpy2.robjects.packages_utils import (_libpaths,
get_packagepath,
_packages,
default_symbol_r2python,
default_symbol_check_after,
_map_symbols,
_fix_map_symbols)
import rpy2.robjects.help as rhelp
_require = rinterface.baseenv['require']
_library = rinterface.baseenv['library']
_as_env = rinterface.baseenv['as.environment']
_package_has_namespace = rinterface.baseenv['packageHasNamespace']
_system_file = rinterface.baseenv['system.file']
_get_namespace = rinterface.baseenv['getNamespace']
_get_namespace_version = rinterface.baseenv['getNamespaceVersion']
_get_namespace_exports = rinterface.baseenv['getNamespaceExports']
_loaded_namespaces = rinterface.baseenv['loadedNamespaces']
_globalenv = rinterface.globalenv
_new_env = rinterface.baseenv["new.env"]
StrSexpVector = rinterface.StrSexpVector
# Fetching symbols in the namespace "utils" assumes that "utils" is loaded
# (currently the case by default in R).
_data = rinterface.baseenv['::'](StrSexpVector(('utils', )),
StrSexpVector(('data', )))
_reval = rinterface.baseenv['eval']
_options = rinterface.baseenv['options']
def no_warnings(func):
""" Decorator to run R functions without warning. """
def run_withoutwarnings(*args, **kwargs):
warn_i = _options().do_slot('names').index('warn')
oldwarn = _options()[warn_i][0]
_options(warn = -1)
try:
res = func(*args, **kwargs)
except Exception as e:
# restore the old warn setting before propagating
# the exception up
_options(warn = oldwarn)
raise e
_options(warn = oldwarn)
return res
return run_withoutwarnings
@no_warnings
def _eval_quiet(expr):
return _reval(expr)
# FIXME: should this be part of the API for rinterface ?
# (may be it is already the case and there is code
# duplicaton ?)
def reval(string, envir = _globalenv):
""" Evaluate a string as R code
:param string: R code
:type string: a :class:`str`
:param envir: an environment in which the environment should take place (default: R's global environment)
"""
p = rinterface.parse(string)
res = _reval(p, envir = envir)
return res
def quiet_require(name, lib_loc = None):
""" Load an R package /quietly/ (suppressing messages to the console). """
if lib_loc == None:
lib_loc = "NULL"
else:
lib_loc = "\"%s\"" % (lib_loc.replace('"', '\\"'))
expr_txt = "suppressPackageStartupMessages(base::require(%s, lib.loc=%s))" \
%(name, lib_loc)
expr = rinterface.parse(expr_txt)
ok = _eval_quiet(expr)
return ok
class PackageData(object):
""" Datasets in an R package.
In R datasets can be distributed with a package.
Datasets can be:
- serialized R objects
- R code (that produces the dataset)
For a given R packages, datasets are stored separately from the rest
of the code and are evaluated/loaded lazily.
The lazy aspect has been conserved and the dataset are only loaded
or generated when called through the method 'fetch()'.
"""
_packagename = None
_lib_loc = None
_datasets = None
def __init__(self, packagename, lib_loc = rinterface.NULL):
self._packagename = packagename
self._lib_loc
def _init_setlist(self):
_datasets = dict()
# 2D array of information about datatsets
tmp_m = _data(**{'package':StrSexpVector((self._packagename, )),
'lib.loc': self._lib_loc})[2]
nrows, ncols = tmp_m.do_slot('dim')
c_i = 2
for r_i in range(nrows):
_datasets[tmp_m[r_i + c_i * nrows]] = None
# FIXME: check if instance methods are overriden
self._datasets = _datasets
def names(self):
""" Names of the datasets"""
if self._datasets is None:
self._init_setlist()
return self._datasets.keys()
def fetch(self, name):
""" Fetch the dataset (loads it or evaluates the R associated
with it.
In R, datasets are loaded into the global environment by default
but this function returns an environment that contains the dataset(s).
"""
#tmp_env = rinterface.SexpEnvironment()
if self._datasets is None:
self._init_setlist()
if name not in self._datasets:
raise ValueError('Data set "%s" cannot be found' % name)
env = _new_env()
_data(StrSexpVector((name, )),
**{'package': StrSexpVector((self._packagename, )),
'lib.loc': self._lib_loc,
'envir': env})
return Environment(env)
class Package(ModuleType):
""" Models an R package
(and can do so from an arbitrary environment - with the caution
that locked environments should mostly be considered).
"""
_env = None
__rname__ = None
_translation = None
_rpy2r = None
__fill_rpy2r__ = None
__update_dict__ = None
_exported_names = None
_symbol_r2python = None
__version__ = None
__rdata__ = None
def __init__(self, env, name, translation = {},
exported_names = None, on_conflict = 'fail',
version = None,
symbol_r2python = default_symbol_r2python,
symbol_check_after = default_symbol_check_after):
""" Create a Python module-like object from an R environment,
using the specified translation if defined.
- env: R environment
- name: package name
- translation: `dict` with R names as keys and corresponding Python
names as values
- exported_names: `set` of names/symbols to expose to instance users
- on_conflict: 'fail' or 'warn' (default: 'fail')
- version: version string for the package
- symbol_r2python: function to convert R symbols into Python symbols.
The default translate `.` into `_`.
- symbol_check_after: function to check the Python symbols obtained
from `symbol_r2python`.
"""
super(Package, self).__init__(name)
self._env = env
self.__rname__ = name
self._translation = translation
mynames = tuple(self.__dict__)
self._rpy2r = {}
if exported_names is None:
exported_names = set(self._env.keys())
self._exported_names = exported_names
self._symbol_r2python = symbol_r2python
self._symbol_check_after = symbol_check_after
self.__fill_rpy2r__(on_conflict = on_conflict)
self._exported_names = self._exported_names.difference(mynames)
self.__version__ = version
def __update_dict__(self, on_conflict = 'fail'):
""" Update the __dict__ according to what is in the R environment """
for elt in self._rpy2r:
del(self.__dict__[elt])
self._rpy2r.clear()
self.__fill_rpy2r__(on_conflict = on_conflict)
def __fill_rpy2r__(self, on_conflict = 'fail'):
""" Fill the attribute _rpy2r.
- on_conflict: 'fail' or 'warn' (default: 'fail')
"""
assert(on_conflict in ('fail', 'warn'))
name = self.__rname__
(symbol_mapping,
conflicts,
resolutions) = _map_symbols(self._env,
translation = self._translation,
symbol_r2python = self._symbol_r2python,
symbol_check_after = self._symbol_check_after)
msg_prefix = 'Conflict when converting R symbols'+\
' in the package "%s"' % self.__rname__ +\
' to Python symbols: \n-'
exception = LibraryError
_fix_map_symbols(symbol_mapping,
conflicts,
on_conflict,
msg_prefix,
exception)
symbol_mapping.update(resolutions)
reserved_pynames = set(dir(self))
for rpyname, rnames in symbol_mapping.items():
# last paranoid check
if len(rnames) > 1:
raise ValueError('Only one R name should be associated with %s (and we have %s)' % (rpyname, str(rnames)))
rname = rnames[0]
if rpyname in reserved_pynames:
raise LibraryError('The symbol ' + rname +\
' in the package "' + name + '"' +\
' is conflicting with' +\
' a Python object attribute')
self._rpy2r[rpyname] = rname
if (rpyname != rname) and (rname in self._exported_names):
self._exported_names.remove(rname)
self._exported_names.add(rpyname)
try:
riobj = self._env[rname]
except rinterface.RRuntimeError as rre:
warn(str(rre))
rpyobj = conversion.ri2ro(riobj)
if hasattr(rpyobj, '__rname__'):
rpyobj.__rname__ = rname
#FIXME: shouldn't the original R name be also in the __dict__ ?
self.__dict__[rpyname] = rpyobj
def __repr__(self):
s = super(Package, self).__repr__()
return 'rpy2.robjects.packages.Package as a ' + s
# alias
STF = SignatureTranslatedFunction
class SignatureTranslatedPackage(Package):
""" R package in which the R functions had their signatures
'translated' (that this the named parameters were made to
to conform Python's rules for vaiable names)."""
def __fill_rpy2r__(self, on_conflict = 'fail'):
super(SignatureTranslatedPackage, self).__fill_rpy2r__(on_conflict = on_conflict)
for name, robj in self.__dict__.items():
if isinstance(robj, rinterface.Sexp) and robj.typeof == rinterface.CLOSXP:
self.__dict__[name] = STF(self.__dict__[name],
on_conflict = on_conflict,
symbol_r2python = self._symbol_r2python,
symbol_check_after = self._symbol_check_after)
# alias
STP = SignatureTranslatedPackage
class SignatureTranslatedAnonymousPackage(SignatureTranslatedPackage):
def __init__(self, string, name):
env = Environment()
reval(string, env)
super(SignatureTranslatedAnonymousPackage, self).__init__(env,
name)
# alias
STAP = SignatureTranslatedAnonymousPackage
class InstalledSTPackage(SignatureTranslatedPackage):
@docstring_property(__doc__)
def __doc__(self):
doc = list(['Python representation of an R package.'])
if not self.__rname__:
doc.append('<No information available>')
else:
try:
doc.append(rhelp.docstring(self.__rname__,
self.__rname__ + '-package',
sections=['description']))
except rhelp.HelpNotFoundError as hnf:
doc.append('[R help was not found]')
return os.linesep.join(doc)
def __fill_rpy2r__(self, on_conflict = 'fail'):
super(SignatureTranslatedPackage, self).__fill_rpy2r__(on_conflict = on_conflict)
for name, robj in self.__dict__.items():
if isinstance(robj, rinterface.Sexp) and robj.typeof == rinterface.CLOSXP:
self.__dict__[name] = DocumentedSTFunction(self.__dict__[name],
packagename = self.__rname__)
class InstalledPackage(Package):
@docstring_property(__doc__)
def __doc__(self):
doc = list(['Python representation of an R package.',
'R arguments:', ''])
if not self.__rname__:
doc.append('<No information available>')
else:
try:
doc.append(rhelp.docstring(self.__rname__,
self.__rname__ + '-package',
sections=['description']))
except rhelp.HelpNotFoundError as hnf:
doc.append('[R help was not found]')
return os.linesep.join(doc)
class WeakPackage(Package):
"""
'Weak' R package, with which looking for symbols results in
a warning (and a None returned) whenever the desired symbol is
not found (rather than a traditional `AttributeError`).
"""
def __getattr__(self, name):
res =self.__dict__.get(name)
if res is None:
warnings.warn("The symbol '%s' is not in this R namespace/package." % name)
return res
class LibraryError(ImportError):
""" Error occuring when importing an R library """
pass
class InstalledPackages(object):
""" R packages installed. """
def __init__(self, lib_loc=None):
libraryiqr = _library(**{'lib.loc': lib_loc})
lib_results_i = libraryiqr.do_slot('names').index('results')
self.lib_results = libraryiqr[lib_results_i]
self.nrows, self.ncols = self.lib_results.do_slot('dim')
self.colnames = self.lib_results.do_slot('dimnames')[1] # column names
self.lib_packname_i = self.colnames.index('Package')
def isinstalled(self, packagename):
if not isinstance(packagename, rinterface.StrSexpVector):
rname = rinterface.StrSexpVector((packagename, ))
else:
if len(packagename) > 1:
raise ValueError("Only specify one package name at a time.")
rname = packagename
nrows, ncols = self.nrows, self.ncols
lib_results, lib_packname_i = self.lib_results, self.lib_packname_i
for i in range(0+lib_packname_i*nrows,
nrows*(lib_packname_i+1),
1):
if lib_results[i] == packagename:
return True
return False
def __iter__(self):
""" Iterate through rows, yield tuples at each iteration """
lib_results = self.lib_results
nrows, ncols = self.nrows, self.ncols
colrg = range(0, ncols)
for row_i in range(nrows):
yield tuple(lib_results[x*nrows+row_i] for x in colrg)
def isinstalled(name,
lib_loc = None):
"""
Find whether an R package is installed
:param name: name of an R package
:param lib_loc: specific location for the R library (default: None)
:rtype: a :class:`bool`
"""
instapack = InstalledPackages(lib_loc)
return instapack.isinstalled(name)
def importr(name,
lib_loc = None,
robject_translations = {},
signature_translation = True,
suppress_messages = True,
on_conflict = 'fail',
symbol_r2python = default_symbol_r2python,
symbol_check_after = default_symbol_check_after,
data = True):
""" Import an R package.
Arguments:
- name: name of the R package
- lib_loc: specific location for the R library (default: None)
- robject_translations: dict (default: {})
- signature_translation: (True or False)
- suppress_message: Suppress messages R usually writes on the console
(defaut: True)
- on_conflict: 'fail' or 'warn' (default: 'fail')
- symbol_r2python: function to translate R symbols into Python symbols
- symbol_check_after: function to check the Python symbol obtained
from `symbol_r2python`.
- data: embed a PackageData objects under the attribute
name __rdata__ (default: True)
Return:
- an instance of class SignatureTranslatedPackage, or of class Package
"""
rname = rinterface.StrSexpVector((name, ))
if suppress_messages:
ok = quiet_require(name, lib_loc = lib_loc)
else:
ok = _require(rinterface.StrSexpVector(rname),
**{'lib.loc': rinterface.StrSexpVector((lib_loc, ))})[0]
if not ok:
raise LibraryError("The R package %s could not be imported" %name)
if _package_has_namespace(rname,
_system_file(package = rname)):
env = _get_namespace(rname)
version = _get_namespace_version(rname)[0]
exported_names = set(_get_namespace_exports(rname))
else:
env = _as_env(rinterface.StrSexpVector(['package:'+name, ]))
exported_names = None
version = None
if signature_translation:
pack = InstalledSTPackage(env, name,
translation = robject_translations,
exported_names = exported_names,
on_conflict = on_conflict,
version = version,
symbol_r2python = symbol_r2python,
symbol_check_after = symbol_check_after)
else:
pack = InstalledPackage(env, name, translation = robject_translations,
exported_names = exported_names,
on_conflict = on_conflict,
version = version,
symbol_r2python = symbol_r2python,
symbol_check_after = symbol_check_after)
if data:
if pack.__rdata__ is not None:
warn('While importing the R package "%s", the rpy2 Package object is masking a translated R symbol "__rdata__" already present' % name)
pack.__rdata__ = PackageData(name, lib_loc = lib_loc)
return pack
def data(package):
""" Return the PackageData for the given package."""
return package.__rdata__
def wherefrom(symbol, startenv = rinterface.globalenv):
""" For a given symbol, return the environment
this symbol is first found in, starting from 'startenv'.
"""
env = startenv
obj = None
tryagain = True
while tryagain:
try:
obj = env[symbol]
tryagain = False
except LookupError as knf:
env = env.enclos()
if env.rsame(rinterface.emptyenv):
tryagain = False
else:
tryagain = True
return conversion.ri2ro(env) | /robustbasePy-1.1.tar.gz/robustbasePy-1.1/lmrob/robjects/packages.py | 0.421314 | 0.269674 | packages.py | pypi |
from rpy2 import rinterface
from warnings import warn
from collections import defaultdict
_packages = rinterface.baseenv['.packages']
_libpaths = rinterface.baseenv['.libPaths']
_find_package = rinterface.baseenv['find.package']
def get_packagepath(package):
""" return the path to an R package installed """
res = _find_package(rinterface.StrSexpVector((package, )))
return res[0]
# Functions to translate R symbols to Python symbols.
# The functions are in this module in order to facilitate
# their access from other modules (without circular dependencies).
# It not necessarily the absolute best place to have the functions though.
def default_symbol_r2python(rname):
return rname.replace('.', '_')
def default_symbol_check_after(symbol_mapping):
# dict to store the Python symbol -> R symbols mapping causing problems.
conflicts = dict()
resolutions = dict()
for py_symbol, r_symbols in symbol_mapping.items():
n_r_symbols = len(r_symbols)
if n_r_symbols == 1:
continue
elif n_r_symbols == 2:
# more than one R symbol associated with this Python symbol
try:
idx = r_symbols.index(py_symbol)
# there is an R symbol identical to the proposed Python symbol;
# we keep that pair mapped, and change the Python symbol for the
# other R symbol(s) according to PEP 0008
for i, s in enumerate(r_symbols):
if i == idx:
resolutions[py_symbol] = [s,]
else:
new_py_symbol = py_symbol + '_'
resolutions[new_py_symbol] = [s,]
except ValueError:
# I am unsure about what to do at this point:
# add it as a conflict
conflicts[py_symbol] = r_symbols
else:
# no automatic resolution if more than 2
conflicts[py_symbol] = r_symbols
return conflicts, resolutions
def _map_symbols(rnames,
translation = dict(),
symbol_r2python = default_symbol_r2python,
symbol_check_after = default_symbol_check_after):
"""
:param names: an iterable of rnames
:param translation: a mapping for R name->python name
:param symbol_r2python: a function to translate an R symbol into a
(presumably valid) Python symbol
:param symbol_check_after: a function to check a prospective set of
translation and resolve conflicts if needed
"""
symbol_mapping = defaultdict(list)
for rname in rnames:
if rname in translation:
rpyname = translation[rname]
else:
rpyname = symbol_r2python(rname)
symbol_mapping[rpyname].append(rname)
conflicts, resolutions = symbol_check_after(symbol_mapping)
return (symbol_mapping, conflicts, resolutions)
def _fix_map_symbols(symbol_mapping,
conflicts,
on_conflict,
msg_prefix,
exception):
"""
:param symbol_mapping: as returned by `_map_symbols`
:param conflicts: as returned by `_map_symbols`
:param on_conflict: action to take if conflict
:param msg_prefix: prefix for error message
:param exception: exception to raise
"""
if len(conflicts) > 0:
msg = msg_prefix
msg += '\n- '.join(('%s -> %s' %(k, ', '.join(v)) for k,v in conflicts.items()))
if on_conflict == 'fail':
msg += '\nTo turn this exception into a simple' +\
' warning use the parameter' +\
' `on_conflict="warn"\`'
raise exception(msg)
elif on_conflict == 'warn':
for k, v in conflicts.items():
if k in v:
symbol_mapping[k] = [k,]
else:
del(symbol_mapping[k])
warn(msg)
else:
raise ValueError('Invalid value for parameter "on_conflict"') | /robustbasePy-1.1.tar.gz/robustbasePy-1.1/lmrob/robjects/packages_utils.py | 0.604632 | 0.412116 | packages_utils.py | pypi |
import sys
from collections import namedtuple
if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 4):
from singledispatch import singledispatch
else:
from functools import singledispatch
def overlay_converter(src, target):
"""
:param src: source of additional conversion rules
:type src: :class:`Converter`
:param target: target. The conversion rules in the src will
be added to this object.
:type target: :class:`Converter`
"""
for k,v in src.ri2ro.registry.items():
# skip the root dispatch
if k is object and v is _ri2ro:
continue
target._ri2ro.register(k, v)
for k,v in src.py2ri.registry.items():
# skip the root dispatch
if k is object and v is _py2ri:
continue
target._py2ri.register(k, v)
for k,v in src.py2ro.registry.items():
# skip the root dispatch
if k is object and v is _py2ro:
continue
target._py2ro.register(k, v)
for k,v in src.ri2py.registry.items():
# skip the root dispatch
if k is object and v is _ri2py:
continue
target._ri2py.register(k, v)
def _ri2ro(obj):
""" Dummy function for ri2ro.
This function will convert rpy2.rinterface (ri) low-level objects
into rpy2.robjects (ro) higher-level objects.
"""
raise NotImplementedError("Conversion 'ri2ro' not defined for objects of type '%s'" % str(type(obj)))
def _py2ri(obj):
""" Dummy function for py2ri.
This function will convert Python objects into rpy2.rinterface
(ri) objects.
"""
raise NotImplementedError("Conversion 'py2ri' not defined for objects of type '%s'" % str(type(obj)))
def _py2ro(obj):
""" Dummy function for py2ro.
This function will convert Python objects into rpy2.robjects
(ro) objects.
"""
raise NotImplementedError("Conversion 'py2ro' not defined for objects of type '%s'" % str(type(obj)))
def _ri2py(obj):
""" Dummy function for ri2py.
This function will convert Python objects into Python (presumably non-rpy2) objects.
"""
raise NotImplementedError("Conversion 'ri2py' not defined for objects of type '%s'" % str(type(obj)))
class Converter(object):
"""
Conversion between rpy2's low-level and high-level proxy objects
for R objects, and Python (no R) objects.
Converter objects can be added, the result being
a Converter objects combining the translation rules from the
different converters.
"""
name = property(lambda self: self._name)
ri2ro = property(lambda self: self._ri2ro)
py2ri = property(lambda self: self._py2ri)
py2ro = property(lambda self: self._py2ro)
ri2py = property(lambda self: self._ri2py)
lineage = property(lambda self: self._lineage)
def __init__(self, name,
template=None):
(ri2ro, py2ri, py2ro, ri2py) = Converter.make_dispatch_functions()
self._name = name
self._ri2ro = ri2ro
self._py2ri = py2ri
self._py2ro = py2ro
self._ri2py = ri2py
if template is None:
lineage = tuple()
else:
lineage = list(template.lineage)
lineage.append(name)
lineage = tuple(lineage)
overlay_converter(template, self)
self._lineage = lineage
def __add__(self, converter):
assert isinstance(converter, Converter)
new_name = '%s + %s' % (self.name, converter.name)
# create a copy of `self` as the result converter
result_converter = Converter(new_name, template=self)
overlay_converter(converter, result_converter)
return result_converter
@staticmethod
def make_dispatch_functions():
ri2ro = singledispatch(_ri2ro)
py2ri = singledispatch(_py2ri)
py2ro = singledispatch(_py2ro)
ri2py = singledispatch(_ri2py)
return (ri2ro, py2ri, py2ro, ri2py)
class ConversionContext(object):
"""
Context manager for instances of class Converter.
"""
def __init__(self, ctx_converter):
assert isinstance(ctx_converter, Converter)
self._original_converter = converter
self.ctx_converter = Converter('Converter-%i-in-context' % id(self),
template=ctx_converter)
def __enter__(self):
set_conversion(self.ctx_converter)
return self.ctx_converter
def __exit__(self, exc_type, exc_val, exc_tb):
set_conversion(self._original_converter)
return False
localconverter = ConversionContext
converter = None
py2ri = None
py2ro = None
ri2ro = None
ri2py = None
def set_conversion(this_converter):
"""
Set conversion rules in the conversion module.
:param this_converter: The conversion rules
:type this_converter: :class:`Converter`
"""
global converter, py2ri, py2ro, ri2ro, ri2py
converter = this_converter
py2ri = converter.py2ri
py2ro = converter.py2ro
ri2ro = converter.ri2ro
ri2py = converter.ri2py
set_conversion(Converter('base converter')) | /robustbasePy-1.1.tar.gz/robustbasePy-1.1/lmrob/robjects/conversion.py | 0.532182 | 0.548976 | conversion.py | pypi |
from .utils import *
from .utils import JDEoptim, _psi_conv_cc, _psi2ipsi, _regularize_Mpsi, _convSS, _Mpsi, _Mwgt
from argparse import Namespace
from six import string_types
class NlrobControl():
"""
Class that contains the parameters options of the lmrob functions
"""
def __init__(self,
method,
psi ="bisquare",
init = "S",
optimizer = "JDEoptim",
fnscale=None,
tuning_chi_tau=None,
tuning_chi_scale=None,
tuning_chi=None,
cutoff=2.5,
*args, **kwargs
):
_Mchi_tuning_defaults = {
## Here, psi must be redescending! -> 'huber' not possible
'bisquare': np.array([1.54764]),
'welsh': np.array([0.5773502]),
'ggw': np.array([-0.5, 1.5, np.nan, 0.5]),
'lqq': np.array([-0.5, 1.5, np.nan, 0.5]),
'optimal': np.array([0.4047]),
'hampel': np.array([1.5, 3.5, 8]) * 0.2119163
}
_Mpsi_tuning_defaults = {
'huber':np.array([1.345]),
'bisquare':np.array([4.685061]),
'welsh':np.array([2.11]),
'ggw':np.array([-0.5, 1.5, .95, np.nan]),
'lqq':np.array([-0.5, 1.5, .95, np.nan]),
'optimal':np.array([1.060158]),
'hampel':np.array([1.5, 3.5, 8]) * 0.9016085
}
self.tuning_psi_M = None
self.psi = psi
if method == "M":
self.method = method
elif method == "MM":
self.method = method
self.init = init
self.psi = psi
self.tuning_chi_scale = _psi_conv_cc(psi, _Mchi_tuning_defaults[psi])
self.tuning_psi_M = _psi_conv_cc(psi, _Mpsi_tuning_defaults[psi])
self.optimizer = optimizer
self.fnscale=fnscale
elif method == "tau":
self.method = method
self.psi = psi
self.tuning_chi_tau = tuning_chi_tau if tuning_chi_tau else None
self.tuning_chi_scale = tuning_chi_scale if tuning_chi_scale else None
self.fnscale = fnscale if fnscale else None
elif method == "CM":
self.method = method
self.psi = psi
self.tuning_chi = tuning_chi if tuning_chi else None
self.fnscale = fnscale if fnscale else None
elif method == "mtl":
self.method = method
self.fnscale = fnscale if fnscale else None
self.cutoff = cutoff if cutoff else 2.5
else:
raise Exception("Method %s not correctly supported yet" % method)
def copy(self):
return copy.copy(self)
def __str__(self):
if self.method == "MM":
string = "self.method = {:}\n".format(self.method)
string += "self.init = {:}\n".format(self.init)
string += "self.psi = {:}\n".format(self.psi)
string += "self.tuning_chi_scale = {:}\n".format(self.tuning_chi_scale)
string += "self.tuning_psi_M = {:}\n".format(self.tuning_psi_M)
string += "self.optimizer = {:}\n".format(self.optimizer)
string += "self.optArgs = {:}\n".format(self.optArgs)
return string
def _Mwgt_psi1(psi, cc=None):
global deriv
if cc is None:
cc = _Mpsi_tuning_default[psi]
ipsi = _psi2ipsi(psi)
ccc = _psi_conv_cc(psi, cc)
def return_func(x, deriv=0):
if deriv:
return _Mpsi(x, ccc, ipsi, deriv)
else:
return _Mwgt(x, ccc, ipsi)
return return_func
def nlrob(formula, data, start=np.zeros(1),
lower=np.array([-np.Inf]),
upper=np.array([np.Inf]),
weights = None,
method = "MM",
psi=None,
scale = None,
control=None,
test_vec = "resid",
maxit = 20,
tol = 1e-06,
algorithm = "lm", doCov=False, trace=False):
"""
Fits a nonlinear regression model by robust methods. Per default, by an M-estimator, using iterated reweighted least squares (called “IRLS” or also “IWLS”).
This function returns a dictionary with the results
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta) (cf. nls). (For some checks: if f(.) is
linear, then we need parentheses, e.g., y ~ (a + b * x)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
start: pandas.core.frame.DataFrame
A named numeric vector of starting parameters estimates, only for
method = "M".
lower: pandas.core.frame.DataFrame
numeric vectors of lower and upper bounds; if needed, will be
replicated to be as long as the longest of start, lower or upper. For
(the default) method = "M", if the bounds are unspecified all
parameters are assumed to be unconstrained; also, for method "M",
bounds can only be used with the "port" algorithm. They are ignored,
with a warning, in cases they have no effect.
For methods "CM" and "mtl", the bounds must additionally have an entry
named "sigma" as that is determined simultaneously in the same
optimization, and hence its lower bound must not be negative.
upper: array_like
numeric vectors of lower and upper bounds; if needed, will be
replicated to be as long as the longest of start, lower or upper. For
(the default) method = "M", if the bounds are unspecified all
parameters are assumed to be unconstrained; also, for method "M",
bounds can only be used with the "port" algorithm. They are ignored,
with a warning, in cases they have no effect.
weights: arrray_like
An optional vector of weights to be used in the fitting process (for
intrinsic weights, not the weights w used in the iterative (robust)
fit). I.e., sum(w * e^2) is minimized with e = residuals,
e[i] = y[i] - f(xreg[i], theta), where f(x, theta) is the nonlinear
function, and w are the robust weights from resid * weights.
method: str
a character string specifying which method to use. The default is "M", for historical and back-compatibility reasons. For the other methods, primarily see nlrob.algorithms.
"M"
Computes an M-estimator, using nls(*, weights=*) iteratively (hence, IRLS) with weights equal to ψ(r_i) / r_i, where r_i is the i-the residual from the previous fit.
"MM"
Computes an MM-estimator, starting from init, either "S" or "lts".
"tau"
Computes a Tau-estimator.
"CM"
Computes a “Constrained M” (=: CM) estimator.
"mtl"
Compute as “Maximum Trimmed Likelihood” (=: MTL) estimator.
Note that all methods but "M" are “random”, hence typically to be preceded by set.seed() in usage.
psi: func
A function of the form g(x, 'tuning constant(s)', deriv) that for deriv=0 returns psi(x)/x and for deriv=1 returns psi'(x). Note that tuning constants can not be passed separately, but directly via the specification of psi, typically via a simple _Mwgt_psi1() call as per default.
scale: float
When not None, a positive number specifying a scale kept fixed during
the iterations (and returned as Scale component).
test_vec: str
Character string specifying the convergence criterion. The relative
change is tested for residuals with a value of "resid" (the default),
for coefficients with "coef", and for weights with "w".
maxit: int
maximum number of iterations in the robust loop.
tol: float
non-negative convergence tolerance for the robust fit.
algorithm: str
character string specifying the algorithm to use for nls, see there,
only when method = "M". The default algorithm is a Gauss-Newton
algorithm.
doCov: bool
a logical specifying if nlrob() should compute the asymptotic
variance-covariance matrix (see vcov) already. This used to be
hard-wired to TRUE; however, the default has been set to FALSE, as vcov
(obj) and summary(obj) can easily compute it when needed.
control: obj
An optional object of control settings.
trace: bool
logical value indicating if a “trace” of the nls iteration progress
should be printed. Default is False.
If True, in each robust iteration, the residual sum-of-squares and the
parameter values are printed at the conclusion of each nls iteration.
Returns
-------
coefficients: array_like
Coefficients of the regressor
residuals: array_like
Difference between the real values and the fitted_values
fitted_values: array_like
Estimated values by th regressor
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Function=NLROBInput.csv")
>>> # M Method
>>> method = "M"
>>> Rfit = nlrob(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob_MM:
nlrob_tau:
nlrob_CM:
nlrob_mtl:
"""
hasWgts = not weights is None
if method != "M":
control = NlrobControl(method)
if (hasWgts):
raise Exception("specifying 'weights' is not yet supported for method %s " % method)
if not psi is None:
print("For method = \"%s\", currently 'psi' must be specified via 'control'" % method )
def fixAns(mod):
ctrl = mod.get("ctrl")
if isinstance(ctrl.psi, string_types) and isinstance(ctrl.tuning_psi_M, (int,float)):
psi = _Mwgt_psi1(ctrl.psi, ctrl.tuning_psi_M)
res_sc = mod.get("residuals") / mod.get("Scale")
mod.update({"psi":psi})
mod.update({"w": psi(res_sc)})
mod.update({"rweights": psi(res_sc)})
return mod
if method == "MM":
return fixAns(nlrob_MM(formula, data, lower=lower, upper=upper,
tol=tol, ctrl=control))
elif method == "CM":
return fixAns(nlrob_CM(formula, data, lower=lower, upper=upper,
tol=tol, ctrl=control))
elif method == "tau":
return fixAns(nlrob_tau(formula, data, lower=lower, upper=upper,
tol=tol, ctrl=control))
elif method == "mtl":
return fixAns(nlrob_CM(formula, data, lower=lower, upper=upper,
tol=tol, ctrl=control))
else:
psi = _Mwgt_psi1("huber", cc=1.345)
updateScale = scale is None
if not updateScale:
if isinstance(scale, (float, int)) and scale > 0:
Scale = scale
else:
raise Exception("'scale' must be NULL or a positive number")
if hasWgts and np.any(weights < 0 or np.isnan(weights).any()):
raise Exception("'weights' must be nonnegative and not contain NAs")
data_names = data.keys()
for var in data_names:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
pnames = start.keys()
p0 = start.values[0]
y = data[formula.split("~")[0].rstrip()].values
nobs = y.size
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(pnames):
right_hand_term = right_hand_term.replace(pname, "p0[%d]" % i)
fit = eval(right_hand_term)
resid = y - fit
iris_delta = lambda old, new: np.sqrt(np.sum((old - new)**2)/np.max((1e-20, np.sum(old ** 2))))
converged = False
method_exit = False
status = "converged"
for iiter in range(maxit):
if trace:
print("robust iteration")
previous = eval(test_vec)
if updateScale:
Scale = np.median(np.abs(resid)) / 0.6745
if Scale == 0:
convi = 0
method_exit = True
status = "could not compute scale of residuals"
print(status)
else:
w = psi(resid/Scale)
if hasWgts:
w = w * weights
data.update({"_nlrob_w":w})
out = nls(formula=formula, data=data, start=start, algorithm=algorithm,
lower=lower, upper=upper)
coef = out.get("coefficients")
resid = out.get("residuals")
convi = iris_delta(previous, eval(test_vec))
converged = convi <= tol
if converged:
break
elif trace:
print(" --> irls.delta(previous, %s) = %g -- *not* converged\n" % (test_vec, convi))
if not converged or method_exit:
st = "failed to converge in %d steps" % maxit
print(st)
status = st
if hasWgts:
tmp = weights != 0
w[tmp] = w[tmp] / weights[tmp]
res_sc = resid / Scale
rw = psi(res_sc)
if not converged or not doCov:
asCov = None
else:
AtWAinv = np.linalg.inv(out.get("cov"))
tau = np.mean(rw ** 2) / np.mean(psi(res_sc)) ** 2
asCov = AtWAinv * Scale ** 2 * tau
dictReturn = {"coefficients": coef,
"formula": formula,
"nobs":nobs,
"residuals": resid,
"fitted_values": fit,
"Scale": Scale,
"w": w,
"rweights": rw,
"cov": asCov,
"test_vec": test_vec,
"status": status,
"iter": iiter,
"psi": psi,
"data": data
}
return dictReturn
def nlrob_MM(formula, data, lower, upper, tol=1e-6, psi="bisquare", init="S",
ctrl=NlrobControl("MM")):
"""
Compute an MM-estimator for nonlinear robust (constrained) regression.
Returns a dictionary with all the variables of interest
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
lower: pandas.core.frame.DataFrame
A vector of starting estimates.
upper: array_like
upper bound, the shape could be 1 or the the same of lower.
psi: str
A function (possibly by name) of the form g(x, 'tuning constant(s)',
deriv)
that for deriv=0 returns (x)/x and for deriv=1 returns 0 (x).
init: str
ctrl: object
NlrobControl Class
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
fitted_values : array_like
residuals: array_like
numeric vector of the residuals.
hessian: array_like
hessian matrix
ctrl: object
NlrobControl Class
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLROBMM.Data=Input.csv")
>>> Rfit_MM = nlrob_MM(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob:
nlrob_tau:
nlrob_CM:
nlrob_mtl:
"""
if ctrl:
init = ctrl.init
psi = ctrl.psi
c1 = ctrl.tuning_chi_scale
c2 = ctrl.tuning_psi_M
if psi == "lqq":
c12 = c1[0] + c2[1]
lqqMax = (c1[0] * c1[2] - 2 * c12)/( 1 - c1[2]) + c12
rho1 = lambda t: Mchi(t, c1, psi)
rho2 = lambda t: Mchi(t, c2, psi)
if psi == "bisquare":
rho_inv = lambda y: c1 * np.sqrt(1 - (1 -y) ** (0.3333333))
elif psi == "lqq":
rho_inv = lambda y: np.array(brentq(lambda x: rho1(x) - y, 0, lqqMax))
elif psi == "optimal":
rho_inv = lambda y: np.sqrt(y / 1.38) * c1 * 3
elif psi == "hampel":
def rho_inv(y):
# TODO:
C = MrhoInf(c1, psi)
a = c1[0]
b = c1[1]
r = c1[2]
if a / C > y:
return np.sqrt(2 * C * y)
elif (2 * b - a )/ C > y:
return 0.5 * a + C / a * y
else:
return r + np.sqrt(r ** 2 - ((r - b) * (2 *C / a * y + (b - a)) \
- b*r ))
else:
raise Exception("Psi function '%s' not supported yet" % psi)
M_scale = lambda sigma, u: np.sum(rho1(u / sigma)) / nobs - 0.5
globals().update({"M_scale": M_scale})
# Defining local variables of functions
data_vars = data.keys()
for var in data_vars:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
par = lower.keys()
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(par):
right_hand_term = right_hand_term.replace(pname, "vector[%d]" % i)
globals().update({"right_hand_term": right_hand_term})
if init == "lts":
def objective_initial(vector):
global right_hand_term
y_hat = eval(right_hand_term)
return np.sum(np.sort(y - y_hat)[:h] ** 2)
elif init == "S":
def objective_initial(vector):
global right_hand_term
y_hat = eval(right_hand_term)
res = y - y_hat
med_abs_res = np.median(np.abs(res))
return np.array(brentq(M_scale, constant[0] * med_abs_res, constant[1] * med_abs_res, args=(res)))
else:
raise Exception("Initialization 'init = \"%s\"' not supported (yet)" %init)
def objective_M(vector, sigma):
global right_hand_term
y_hat = eval(right_hand_term)
return np.sum(rho2( (y - y_hat) / sigma))
def fminfn(p, sigma):
global fnscale
global parscale
return objective_M(p * parscale, sigma) / fnscale
def fmingr(p, sigma):
global fnscale
global parscale
x = np.zeros_like(p)
df = np.zeros_like(p)
for i in range(p.size):
epsused = eps = 1e-3
tmp = p[i] + eps
if tmp > upper[i]:
tmp = upper[i]
epsused = tmp - p[i]
x[i] = tmp * parscale[i]
s = objective_M(p, sigma)
val1 = s / fnscale
tmp = p[i] - eps
if (tmp < lower[i]):
tmp = lower[i]
eps = p[i] - tmp
x[i] = tmp * parscale[i]
s = objective_M(p, sigma)
val2 = s/ fnscale
df[i] = (val1 - val2)/(epsused + eps)
if df[i] == np.Inf or df[i] == -np.Inf:
raise Exception("non-finite finite-difference value [%d]" % i+1)
x[i] = p[i] * parscale[i]
return df
npar = len(par)
nobs = y.size
if npar > nobs:
raise Exception("npar > nobs")
if ctrl.fnscale:
fnscale = ctrl.fnscale
else:
fnscale = np.sum((y - np.mean(y)) ** 2)
globals().update({"fnscale": fnscale})
if psi == "bisquare":
constant = [1 / c1]
elif psi == "lqq":
constant = [1 / lqqMax]
elif psi == "optimal":
constant = [1 / c1 * 1 / 3]
elif psi == "hampel":
constant = [1 / c1[2]]
constant.append(2 / rho_inv(2 / (nobs + 2)) if nobs % 2 else 1 / rho_inv( 1 /(nobs + 1)))
globals().update({"constant": constant})
if init == "lts":
h = (nobs + npar + 1) // 2
if npar > upper.size:
upper = np.repeat(upper, npar)
initial = JDEoptim(lower.values[0], upper, objective_initial, tol=tol,
fnscale=fnscale)
parscale = initial.get("par")
globals().update({"parscale": parscale})
for var in par:
exec("%s = lower['%s'].values" % (var, var), globals(), locals())
res = y - eval(formula.split("~")[1])
med_abs_res = np.median(np.abs(res))
sigma = np.array(brentq(M_scale, constant[0] * med_abs_res, constant[1] * med_abs_res, args=(res)))
lower = lower.values.ravel()
bounds = Bounds(lb=lower, ub=upper)
globals().update({"sigma": sigma})
M = minimize(fminfn, initial.get("par"), jac=fmingr, args=sigma, method='L-BFGS-B',
bounds=bounds, tol=tol)
coef = dict(zip(par, M.x))
if M.status == 0:
status = "converged"
elif M.status == 1:
status = "maximum number of iterations reached without convergence"
else:
status = M.message
for var in par:
exec("%s = coef['%s']" % (var, var), globals(), locals())
try:
hess = np.linalg.inv(M.hess_inv.todense())
except:
hess = None
vector = M.x
fit = eval(right_hand_term)
dictReturn = {"formula": formula,
"nobs": nobs,
"coefficients": coef,
"fitted_values": fit,
"residuals": y - fit,
"ctrl": ctrl,
"crit": M.fun,
"initial": initial,
"Scale": sigma,
"status": status,
"hessian": hess}
return dictReturn
def nlrob_tau(formula, data, lower, upper, tol=1e-6, psi="bisquare",
ctrl=NlrobControl("tau"), tuning_chi_scale=None,
tuning_chi_tau=None):
"""
Computes a Tau-estimator for nonlinear robust (constrained) regression.
Returns a dictionary with all the variables of interest
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
lower: pandas.core.frame.DataFrame
Dataframe with the initial guesses
upper: array_like
upper bound, the shape could be 1 or the the same of lower.
psi: str
A function (possibly by name) of the form g(x, 'tuning constant(s)',
deriv)
that for deriv=0 returns (x)/x and for deriv=1 returns 0 (x).
init: str
ctrl: object
NlrobControl Class
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
fitted_values : array_like
residuals: array_like
numeric vector of the residuals.
hessian: array_like
hessian matrix
ctrl: object
NlrobControl Class
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLROBTAU.Data=Input.csv")
>>> Rfit_tau = nlrob_tau(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob:
nlrob_MM:
nlrob_CM:
nlrob_mtl:
"""
if ctrl:
psi = ctrl.psi
# Defining local variables of functions
data_names = data.keys()
for var in data_names:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
if ctrl.tuning_chi_scale is None:
if psi == "bisquare":
_chi_s = {"b": 0.2, "cc": 1.55}
elif psi == "optimal":
_chi_s = {"b": 0.5, "cc": 0.405}
if ctrl.tuning_chi_tau is None:
if psi == "bisquare":
_chi_t = {"b": 0.46, "cc": 6.04}
elif psi == "optimal":
_chi_t = {"b": 0.128, "cc": 1.06}
b1 = _chi_s.get("b")
c1 = _chi_s.get("cc")
b2 = _chi_t.get("b")
c2 = _chi_t.get("cc")
if psi == "bisquare":
b1 = b1 / MrhoInf(c1, psi)
b2 = b2 / MrhoInf(c1, psi)
rho1 = lambda t: Mchi(t, c1, psi)
rho2 = lambda t: Mchi(t, c2, psi)
if psi == "bisquare":
rho_inv = lambda y: c1 * np.sqrt(1 - (1 - y)**(1/3))
elif psi == "optimal":
rho_inv = lambda y: np.sqrt(y/1.38) * c1 * 3
M_scale = lambda sigma, u: np.sum( rho1(u/sigma) )/nobs - b1
tau_scale2 = lambda u, sigma: sigma ** 2 * 1 / b2 * np.sum(rho2(u / sigma))/ nobs
par = lower.keys()
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(par):
right_hand_term = right_hand_term.replace(pname, "vector[%d]" % i)
def objective(vector):
fit = eval(right_hand_term)
res = y - fit
med_abs_res = np.median(np.abs(res))
sigma = np.array(brentq(M_scale, constant[0] * med_abs_res,
constant[1] * med_abs_res, args=(res)))
return tau_scale2(res, sigma)
npar = len(par)
nobs = y.size
if npar > nobs:
raise Exception("npar > nobs")
if ctrl.fnscale:
fnscale = ctrl.fnscale
else:
fnscale = np.mean((y - np.mean(y)) ** 2)
if psi == "bisquare":
constant = [1 / c1]
elif psi == "optimal":
constant = [1 / c1 * 1 / 3]
constant.append(2 / rho_inv(2 / (nobs + 2)) if nobs % 2 else 1 / rho_inv( 1 /(nobs + 1)))
if npar > upper.size:
upper = np.repeat(upper, npar)
optRes = JDEoptim(lower.values[0], upper, objective, tol=tol,
fnscale=fnscale)
it = optRes.get("iter")
status = "converged" if optRes.get("convergence") == 0 else "failed to convergence in %d steps" % it
coef = dict(zip(lower.keys(), optRes.get("par")))
vector = optRes.get("par")
fit = eval(right_hand_term)
dictReturn = {"formula": formula,
"nobs": nobs,
"coefficients": coef,
"fitted_values": fit,
"residuals": y - fit,
"crit": optRes.get("value"),
"initial": optRes,
"Scale": np.sqrt(optRes.get("value")),
"status": status,
"iter": it,
"ctrl": ctrl}
return dictReturn
def nlrob_CM(formula, data, lower, upper, tol=1e-6, psi="bisquare",
ctrl=NlrobControl("CM")):
"""
Compute an MM-estimator for nonlinear robust (constrained) regression.
Returns a dictionary with all the variables of interest
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
lower: pandas.core.frame.DataFrame
Dataframe with the initial guesses
upper: array_like
upper bound, the shape could be 1 or the the same of lower.
psi: str
A function (possibly by name) of the form g(x, 'tuning constant(s)',
deriv)
that for deriv=0 returns (x)/x and for deriv=1 returns 0 (x).
init: str
ctrl: object
NlrobControl Class
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
fitted_values : array_like
residuals: array_like
numeric vector of the residuals.
hessian: array_like
hessian matrix
ctrl: object
NlrobControl Class
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLROBCM.Data=Input.csv")
>>> Rfit_CM = nlrob_cm(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob:
nlrob_MM:
nlrob_tau:
nlrob_mtl:
"""
if ctrl:
psi = ctrl.psi
if psi == "bisquare":
t_chi = {"b": 0.5, "cc":1, "c":4.835}
b = t_chi.get("b")
c = t_chi.get("c")
cc = t_chi.get("cc")
rho = lambda t: Mchi(t, cc, psi)
M_scale = lambda sigma, u: np.sum(rho(u / sigma)) / nobs - b
# Defining local variables of functions
data_names = data.keys()
for var in data_names:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
pnames = [name for name in lower.keys() if name in lower.keys() or name == "sigma"]
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(pnames):
right_hand_term = right_hand_term.replace(pname, "vector[%d]" % i)
if "sigma" in pnames:
if "sigma" in formula.split("~")[1] or "sigma" in data.keys():
raise Exception("As \"sigma\" is in 'pnames', do not use it as variable or parameter name in 'formula'")
def objective(vector):
fit = eval(right_hand_term)
sigma = vector[-1]
return c * np.sum(rho( (y - fit)/sigma ))/nobs + np.log(sigma)
def con(vector):
fit = eval(right_hand_term)
return M_scale(vector[-1], y - fit)
else:
def objective(vector):
fit = eval(right_hand_term)
res = y - fit
sigma = np.median(np.abs(res - np.median(res)))
return c * np.sum(rho(res / sigma)) / nobs + np.log(sigma)
con = None
npar = len(pnames)
nobs = y.size
if npar > nobs:
raise Exception("npar > nobs")
if ctrl.fnscale:
fnscale = ctrl.fnscale
else:
fnscale = np.mean((y - np.mean(y)) ** 2)
if npar > upper.size:
upper = np.repeat(upper, lower.size)
optRes = JDEoptim(lower.values[0], upper, objective, tol=tol,
fnscale=fnscale, constr=con)
it = optRes.get("iter")
status = "converged" if optRes.get("convergence") == 0 else "failed to convergence in %d steps" % it
coef = dict(zip(lower.keys(), optRes.get("par")))
vector = optRes.get("par")
fit = eval(right_hand_term)
dictReturn = {"formula": formula,
"nobs": nobs,
"coefficients": coef,
"fitted_values": fit,
"residuals": y - fit,
"crit": optRes.get("value"),
"status": status,
"iter": it,
"ctrl": ctrl}
return dictReturn
def nlrob_mtl(formula, data, lower, upper, tol=1e-6, psi="bisquare",
ctrl=NlrobControl("mtl")):
"""
Compute a mtl-estimator for nonlinear robust (constrained) regression
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
lower: pandas.core.frame.DataFrame
Dataframe with the initial guesses
upper: array_like
upper bound, the shape could be 1 or the the same of lower.
psi: str
A function (possibly by name) of the form g(x, 'tuning constant(s)',
deriv)
that for deriv=0 returns (x)/x and for deriv=1 returns 0 (x).
init: str
ctrl: object
NlrobControl Class
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
fitted_values : array_like
residuals: array_like
numeric vector of the residuals.
hessian: array_like
hessian matrix
ctrl: object
NlrobControl Class
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLROBMTL.Data=Input.csv")
>>> Rfit_mtl = nlrob_mtl(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob:
nlrob_MM:
nlrob_tau:
nlrob_cm:
"""
if ctrl:
cutoff = ctrl.cutoff
def trim(t):
t = np.sort(t)
i = np.where(t >= cutoff)[0]
partial_h = np.min( (i - 1)/(2 * np.random.normal(t[i]) - 1))
partial_h = np.max(np.floor(partial_h))
h = np.max([hlow, partial_h]) if i.size else nobs
return {"h": h, "t": t}
rho = lambda t: Mchi(t, cc, psi)
M_scale = lambda sigma, u: np.sum(rho(u / sigma)) / nobs - b
# Defining local variables of functions
data_names = data.keys()
for var in data_names:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
pnames = [name for name in lower.keys() if name in lower.keys() or name == "sigma"]
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(pnames):
right_hand_term = right_hand_term.replace(pname, "vector[%d]" % i)
constant = np.log(2 * np.pi)
if "sigma" in pnames:
if "sigma" in formula.split("~")[1] or "sigma" in data.keys():
raise Exception("As \"sigma\" is in 'pnames', do not use it as variable or parameter name in 'formula'")
def objective(vector):
fit = eval(right_hand_term)
sigma = vector[-1]
tp = trim(np.abs( (y - fit) / sigma))
h = tp.get("h")
return h * (constant + 2 * np.log(sigma)) + np.sum(tp.get("t")[:h] ** 2)
else:
def objective(vector):
fit = eval(right_hand_term)
res = y - fit
sigma = np.median(np.abs(res - np.median(res)))
tp = trim(np.abs(res / sigma))
h = int(tp.get("h"))
return h * (constant + 2 * np.log(sigma)) + np.sum(tp.get("t")[:h] ** 2)
npar = len(pnames)
nobs = y.size
if npar > nobs:
raise Exception("npar > nobs")
if ctrl.fnscale:
fnscale = ctrl.fnscale
else:
fnscale = np.mean((y - np.mean(y)) ** 2)
if npar > upper.size:
upper = np.repeat(upper, lower.size)
hlow = (nobs + npar + 1) // 2
optRes = JDEoptim(lower.values[0], upper, objective, tol=tol,
fnscale=fnscale)
it = optRes.get("iter")
status = "converged" if optRes.get("convergence") == 0 else "failed to convergence in %d steps" % it
coef = dict(zip(lower.keys(), optRes.get("par")))
vector = optRes.get("par")
fit = eval(right_hand_term)
res = y - fit
quan = trim( res/(coef["sigma"] if ("sigma" in pnames) else np.median(np.abs(res - np.median(res))))).get("h")
dictReturn = {"formula": formula,
"nobs": nobs,
"coefficients": coef,
"fitted_values": fit,
"residuals": res,
"crit": optRes.get("value"),
"quan": quan,
"status": status,
"iter": it,
"ctrl": ctrl}
return dictReturn
def nls(formula, data, start, algorithm="lm",
weights=None, lower=np.array([-np.Inf]), upper=np.array([np.Inf])):
"""
Determine the nonlinear (weighted) least-squares estimates of the parameters of a nonlinear model.
Usage
nls(formula, data, start, control, algorithm,
trace, subset, weights, na.action, model,
lower, upper, ...)
Parameters
----------
formula: str
a nonlinear model formula including variables and parameters. Will be coerced to a formula if necessary.
data: pandas.core.frame.DataFrame
Data frame in which to evaluate the variables in formula and weights. Can also be a list or an environment, but not a matrix.
start: pandas.core.frame.DataFrame
A vector of starting estimates.
algorithm: str
Character string specifying the algorithm to use. The default algorithm is a "lm" algorithm. Other possible values are 'trf', 'dogbox'
lower: scalar, array_like
Lower bounds on Parameters. An array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters.)
upper: scalar, array_like
Upper bounds on Parameters. An array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters.)
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
residuals: array_like
numeric vector of the residuals.
cov: array_like
covariance matrix
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLS.Data=Input.csv")
>>> Rfit_nls = nls(formula, data, lower)
See Also
--------
nlrob:
nlrob_MM:
nlrob_tau:
nlrob_cm:
"""
for var in data.keys():
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for pname in data.keys():
i = 0
if pname in right_hand_term:
break
pnames = start.keys()
p0 = start.values[0]
x = pname
if lower.any():
if lower.size != p0.size:
lower = np.repeat(lower, p0.size)
if upper.any():
if upper.size != p0.size:
upper = np.repeat(upper, p0.size)
bounds = (lower, upper)
def get_func():
env = {"np": np}
code = "def func(%s, %s):\n" % (x, ", ".join(pnames))
code += " return %s\n" % right_hand_term
exec(code, env)
return env.get("func")
other_params = "bounds=bounds, method='%s'" % algorithm
func = get_func()
par, cov = eval("curve_fit(func, %s, y, p0, %s)" % (x, other_params),
globals(), locals())
popt_str = ["par[%d]" % i for i, p in enumerate(par)]
fit = eval("func(%s, %s)" % (x, ", ".join(popt_str)))
res = y - fit
dictReturn = {"coefficients": dict(zip(start.keys(), par)),
"residuals": res,
"cov":cov}
return dictReturn | /robustbasePy-1.1.tar.gz/robustbasePy-1.1/nlrob/nlrob.py | 0.701611 | 0.364806 | nlrob.py | pypi |
from .utils import *
from .utils import JDEoptim, _psi_conv_cc, _psi2ipsi, _regularize_Mpsi, _convSS, _Mpsi, _Mwgt
from argparse import Namespace
from six import string_types
class NlrobControl():
"""
Class that contains the parameters options of the lmrob functions
"""
def __init__(self,
method,
psi ="bisquare",
init = "S",
optimizer = "JDEoptim",
fnscale=None,
tuning_chi_tau=None,
tuning_chi_scale=None,
tuning_chi=None,
cutoff=2.5,
*args, **kwargs
):
_Mchi_tuning_defaults = {
## Here, psi must be redescending! -> 'huber' not possible
'bisquare': np.array([1.54764]),
'welsh': np.array([0.5773502]),
'ggw': np.array([-0.5, 1.5, np.nan, 0.5]),
'lqq': np.array([-0.5, 1.5, np.nan, 0.5]),
'optimal': np.array([0.4047]),
'hampel': np.array([1.5, 3.5, 8]) * 0.2119163
}
_Mpsi_tuning_defaults = {
'huber':np.array([1.345]),
'bisquare':np.array([4.685061]),
'welsh':np.array([2.11]),
'ggw':np.array([-0.5, 1.5, .95, np.nan]),
'lqq':np.array([-0.5, 1.5, .95, np.nan]),
'optimal':np.array([1.060158]),
'hampel':np.array([1.5, 3.5, 8]) * 0.9016085
}
self.tuning_psi_M = None
self.psi = psi
if method == "M":
self.method = method
elif method == "MM":
self.method = method
self.init = init
self.psi = psi
self.tuning_chi_scale = _psi_conv_cc(psi, _Mchi_tuning_defaults[psi])
self.tuning_psi_M = _psi_conv_cc(psi, _Mpsi_tuning_defaults[psi])
self.optimizer = optimizer
self.fnscale=fnscale
elif method == "tau":
self.method = method
self.psi = psi
self.tuning_chi_tau = tuning_chi_tau if tuning_chi_tau else None
self.tuning_chi_scale = tuning_chi_scale if tuning_chi_scale else None
self.fnscale = fnscale if fnscale else None
elif method == "CM":
self.method = method
self.psi = psi
self.tuning_chi = tuning_chi if tuning_chi else None
self.fnscale = fnscale if fnscale else None
elif method == "mtl":
self.method = method
self.fnscale = fnscale if fnscale else None
self.cutoff = cutoff if cutoff else 2.5
else:
raise Exception("Method %s not correctly supported yet" % method)
def copy(self):
return copy.copy(self)
def __str__(self):
if self.method == "MM":
string = "self.method = {:}\n".format(self.method)
string += "self.init = {:}\n".format(self.init)
string += "self.psi = {:}\n".format(self.psi)
string += "self.tuning_chi_scale = {:}\n".format(self.tuning_chi_scale)
string += "self.tuning_psi_M = {:}\n".format(self.tuning_psi_M)
string += "self.optimizer = {:}\n".format(self.optimizer)
string += "self.optArgs = {:}\n".format(self.optArgs)
return string
def _Mwgt_psi1(psi, cc=None):
global deriv
if cc is None:
cc = _Mpsi_tuning_default[psi]
ipsi = _psi2ipsi(psi)
ccc = _psi_conv_cc(psi, cc)
def return_func(x, deriv=0):
if deriv:
return _Mpsi(x, ccc, ipsi, deriv)
else:
return _Mwgt(x, ccc, ipsi)
return return_func
def nlrob(formula, data, start=np.zeros(1),
lower=np.array([-np.Inf]),
upper=np.array([np.Inf]),
weights = None,
method = "MM",
psi=None,
scale = None,
control=None,
test_vec = "resid",
maxit = 20,
tol = 1e-06,
algorithm = "lm", doCov=False, trace=False):
"""
Fits a nonlinear regression model by robust methods. Per default, by an M-estimator, using iterated reweighted least squares (called “IRLS” or also “IWLS”).
This function returns a dictionary with the results
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta) (cf. nls). (For some checks: if f(.) is
linear, then we need parentheses, e.g., y ~ (a + b * x)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
start: pandas.core.frame.DataFrame
A named numeric vector of starting parameters estimates, only for
method = "M".
lower: pandas.core.frame.DataFrame
numeric vectors of lower and upper bounds; if needed, will be
replicated to be as long as the longest of start, lower or upper. For
(the default) method = "M", if the bounds are unspecified all
parameters are assumed to be unconstrained; also, for method "M",
bounds can only be used with the "port" algorithm. They are ignored,
with a warning, in cases they have no effect.
For methods "CM" and "mtl", the bounds must additionally have an entry
named "sigma" as that is determined simultaneously in the same
optimization, and hence its lower bound must not be negative.
upper: array_like
numeric vectors of lower and upper bounds; if needed, will be
replicated to be as long as the longest of start, lower or upper. For
(the default) method = "M", if the bounds are unspecified all
parameters are assumed to be unconstrained; also, for method "M",
bounds can only be used with the "port" algorithm. They are ignored,
with a warning, in cases they have no effect.
weights: arrray_like
An optional vector of weights to be used in the fitting process (for
intrinsic weights, not the weights w used in the iterative (robust)
fit). I.e., sum(w * e^2) is minimized with e = residuals,
e[i] = y[i] - f(xreg[i], theta), where f(x, theta) is the nonlinear
function, and w are the robust weights from resid * weights.
method: str
a character string specifying which method to use. The default is "M", for historical and back-compatibility reasons. For the other methods, primarily see nlrob.algorithms.
"M"
Computes an M-estimator, using nls(*, weights=*) iteratively (hence, IRLS) with weights equal to ψ(r_i) / r_i, where r_i is the i-the residual from the previous fit.
"MM"
Computes an MM-estimator, starting from init, either "S" or "lts".
"tau"
Computes a Tau-estimator.
"CM"
Computes a “Constrained M” (=: CM) estimator.
"mtl"
Compute as “Maximum Trimmed Likelihood” (=: MTL) estimator.
Note that all methods but "M" are “random”, hence typically to be preceded by set.seed() in usage.
psi: func
A function of the form g(x, 'tuning constant(s)', deriv) that for deriv=0 returns psi(x)/x and for deriv=1 returns psi'(x). Note that tuning constants can not be passed separately, but directly via the specification of psi, typically via a simple _Mwgt_psi1() call as per default.
scale: float
When not None, a positive number specifying a scale kept fixed during
the iterations (and returned as Scale component).
test_vec: str
Character string specifying the convergence criterion. The relative
change is tested for residuals with a value of "resid" (the default),
for coefficients with "coef", and for weights with "w".
maxit: int
maximum number of iterations in the robust loop.
tol: float
non-negative convergence tolerance for the robust fit.
algorithm: str
character string specifying the algorithm to use for nls, see there,
only when method = "M". The default algorithm is a Gauss-Newton
algorithm.
doCov: bool
a logical specifying if nlrob() should compute the asymptotic
variance-covariance matrix (see vcov) already. This used to be
hard-wired to TRUE; however, the default has been set to FALSE, as vcov
(obj) and summary(obj) can easily compute it when needed.
control: obj
An optional object of control settings.
trace: bool
logical value indicating if a “trace” of the nls iteration progress
should be printed. Default is False.
If True, in each robust iteration, the residual sum-of-squares and the
parameter values are printed at the conclusion of each nls iteration.
Returns
-------
coefficients: array_like
Coefficients of the regressor
residuals: array_like
Difference between the real values and the fitted_values
fitted_values: array_like
Estimated values by th regressor
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Function=NLROBInput.csv")
>>> # M Method
>>> method = "M"
>>> Rfit = nlrob(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob_MM:
nlrob_tau:
nlrob_CM:
nlrob_mtl:
"""
hasWgts = not weights is None
if method != "M":
control = NlrobControl(method)
if (hasWgts):
raise Exception("specifying 'weights' is not yet supported for method %s " % method)
if not psi is None:
print("For method = \"%s\", currently 'psi' must be specified via 'control'" % method )
def fixAns(mod):
ctrl = mod.get("ctrl")
if isinstance(ctrl.psi, string_types) and isinstance(ctrl.tuning_psi_M, (int,float)):
psi = _Mwgt_psi1(ctrl.psi, ctrl.tuning_psi_M)
res_sc = mod.get("residuals") / mod.get("Scale")
mod.update({"psi":psi})
mod.update({"w": psi(res_sc)})
mod.update({"rweights": psi(res_sc)})
return mod
if method == "MM":
return fixAns(nlrob_MM(formula, data, lower=lower, upper=upper,
tol=tol, ctrl=control))
elif method == "CM":
return fixAns(nlrob_CM(formula, data, lower=lower, upper=upper,
tol=tol, ctrl=control))
elif method == "tau":
return fixAns(nlrob_tau(formula, data, lower=lower, upper=upper,
tol=tol, ctrl=control))
elif method == "mtl":
return fixAns(nlrob_CM(formula, data, lower=lower, upper=upper,
tol=tol, ctrl=control))
else:
psi = _Mwgt_psi1("huber", cc=1.345)
updateScale = scale is None
if not updateScale:
if isinstance(scale, (float, int)) and scale > 0:
Scale = scale
else:
raise Exception("'scale' must be NULL or a positive number")
if hasWgts and np.any(weights < 0 or np.isnan(weights).any()):
raise Exception("'weights' must be nonnegative and not contain NAs")
data_names = data.keys()
for var in data_names:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
pnames = start.keys()
p0 = start.values[0]
y = data[formula.split("~")[0].rstrip()].values
nobs = y.size
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(pnames):
right_hand_term = right_hand_term.replace(pname, "p0[%d]" % i)
fit = eval(right_hand_term)
resid = y - fit
iris_delta = lambda old, new: np.sqrt(np.sum((old - new)**2)/np.max((1e-20, np.sum(old ** 2))))
converged = False
method_exit = False
status = "converged"
for iiter in range(maxit):
if trace:
print("robust iteration")
previous = eval(test_vec)
if updateScale:
Scale = np.median(np.abs(resid)) / 0.6745
if Scale == 0:
convi = 0
method_exit = True
status = "could not compute scale of residuals"
print(status)
else:
w = psi(resid/Scale)
if hasWgts:
w = w * weights
data.update({"_nlrob_w":w})
out = nls(formula=formula, data=data, start=start, algorithm=algorithm,
lower=lower, upper=upper)
coef = out.get("coefficients")
resid = out.get("residuals")
convi = iris_delta(previous, eval(test_vec))
converged = convi <= tol
if converged:
break
elif trace:
print(" --> irls.delta(previous, %s) = %g -- *not* converged\n" % (test_vec, convi))
if not converged or method_exit:
st = "failed to converge in %d steps" % maxit
print(st)
status = st
if hasWgts:
tmp = weights != 0
w[tmp] = w[tmp] / weights[tmp]
res_sc = resid / Scale
rw = psi(res_sc)
if not converged or not doCov:
asCov = None
else:
AtWAinv = np.linalg.inv(out.get("cov"))
tau = np.mean(rw ** 2) / np.mean(psi(res_sc)) ** 2
asCov = AtWAinv * Scale ** 2 * tau
dictReturn = {"coefficients": coef,
"formula": formula,
"nobs":nobs,
"residuals": resid,
"fitted_values": fit,
"Scale": Scale,
"w": w,
"rweights": rw,
"cov": asCov,
"test_vec": test_vec,
"status": status,
"iter": iiter,
"psi": psi,
"data": data
}
return dictReturn
def nlrob_MM(formula, data, lower, upper, tol=1e-6, psi="bisquare", init="S",
ctrl=NlrobControl("MM")):
"""
Compute an MM-estimator for nonlinear robust (constrained) regression.
Returns a dictionary with all the variables of interest
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
lower: pandas.core.frame.DataFrame
A vector of starting estimates.
upper: array_like
upper bound, the shape could be 1 or the the same of lower.
psi: str
A function (possibly by name) of the form g(x, 'tuning constant(s)',
deriv)
that for deriv=0 returns (x)/x and for deriv=1 returns 0 (x).
init: str
ctrl: object
NlrobControl Class
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
fitted_values : array_like
residuals: array_like
numeric vector of the residuals.
hessian: array_like
hessian matrix
ctrl: object
NlrobControl Class
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLROBMM.Data=Input.csv")
>>> Rfit_MM = nlrob_MM(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob:
nlrob_tau:
nlrob_CM:
nlrob_mtl:
"""
if ctrl:
init = ctrl.init
psi = ctrl.psi
c1 = ctrl.tuning_chi_scale
c2 = ctrl.tuning_psi_M
if psi == "lqq":
c12 = c1[0] + c2[1]
lqqMax = (c1[0] * c1[2] - 2 * c12)/( 1 - c1[2]) + c12
rho1 = lambda t: Mchi(t, c1, psi)
rho2 = lambda t: Mchi(t, c2, psi)
if psi == "bisquare":
rho_inv = lambda y: c1 * np.sqrt(1 - (1 -y) ** (0.3333333))
elif psi == "lqq":
rho_inv = lambda y: np.array(brentq(lambda x: rho1(x) - y, 0, lqqMax))
elif psi == "optimal":
rho_inv = lambda y: np.sqrt(y / 1.38) * c1 * 3
elif psi == "hampel":
def rho_inv(y):
# TODO:
C = MrhoInf(c1, psi)
a = c1[0]
b = c1[1]
r = c1[2]
if a / C > y:
return np.sqrt(2 * C * y)
elif (2 * b - a )/ C > y:
return 0.5 * a + C / a * y
else:
return r + np.sqrt(r ** 2 - ((r - b) * (2 *C / a * y + (b - a)) \
- b*r ))
else:
raise Exception("Psi function '%s' not supported yet" % psi)
M_scale = lambda sigma, u: np.sum(rho1(u / sigma)) / nobs - 0.5
globals().update({"M_scale": M_scale})
# Defining local variables of functions
data_vars = data.keys()
for var in data_vars:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
par = lower.keys()
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(par):
right_hand_term = right_hand_term.replace(pname, "vector[%d]" % i)
globals().update({"right_hand_term": right_hand_term})
if init == "lts":
def objective_initial(vector):
global right_hand_term
y_hat = eval(right_hand_term)
return np.sum(np.sort(y - y_hat)[:h] ** 2)
elif init == "S":
def objective_initial(vector):
global right_hand_term
y_hat = eval(right_hand_term)
res = y - y_hat
med_abs_res = np.median(np.abs(res))
return np.array(brentq(M_scale, constant[0] * med_abs_res, constant[1] * med_abs_res, args=(res)))
else:
raise Exception("Initialization 'init = \"%s\"' not supported (yet)" %init)
def objective_M(vector, sigma):
global right_hand_term
y_hat = eval(right_hand_term)
return np.sum(rho2( (y - y_hat) / sigma))
def fminfn(p, sigma):
global fnscale
global parscale
return objective_M(p * parscale, sigma) / fnscale
def fmingr(p, sigma):
global fnscale
global parscale
x = np.zeros_like(p)
df = np.zeros_like(p)
for i in range(p.size):
epsused = eps = 1e-3
tmp = p[i] + eps
if tmp > upper[i]:
tmp = upper[i]
epsused = tmp - p[i]
x[i] = tmp * parscale[i]
s = objective_M(p, sigma)
val1 = s / fnscale
tmp = p[i] - eps
if (tmp < lower[i]):
tmp = lower[i]
eps = p[i] - tmp
x[i] = tmp * parscale[i]
s = objective_M(p, sigma)
val2 = s/ fnscale
df[i] = (val1 - val2)/(epsused + eps)
if df[i] == np.Inf or df[i] == -np.Inf:
raise Exception("non-finite finite-difference value [%d]" % i+1)
x[i] = p[i] * parscale[i]
return df
npar = len(par)
nobs = y.size
if npar > nobs:
raise Exception("npar > nobs")
if ctrl.fnscale:
fnscale = ctrl.fnscale
else:
fnscale = np.sum((y - np.mean(y)) ** 2)
globals().update({"fnscale": fnscale})
if psi == "bisquare":
constant = [1 / c1]
elif psi == "lqq":
constant = [1 / lqqMax]
elif psi == "optimal":
constant = [1 / c1 * 1 / 3]
elif psi == "hampel":
constant = [1 / c1[2]]
constant.append(2 / rho_inv(2 / (nobs + 2)) if nobs % 2 else 1 / rho_inv( 1 /(nobs + 1)))
globals().update({"constant": constant})
if init == "lts":
h = (nobs + npar + 1) // 2
if npar > upper.size:
upper = np.repeat(upper, npar)
initial = JDEoptim(lower.values[0], upper, objective_initial, tol=tol,
fnscale=fnscale)
parscale = initial.get("par")
globals().update({"parscale": parscale})
for var in par:
exec("%s = lower['%s'].values" % (var, var), globals(), locals())
res = y - eval(formula.split("~")[1])
med_abs_res = np.median(np.abs(res))
sigma = np.array(brentq(M_scale, constant[0] * med_abs_res, constant[1] * med_abs_res, args=(res)))
lower = lower.values.ravel()
bounds = Bounds(lb=lower, ub=upper)
globals().update({"sigma": sigma})
M = minimize(fminfn, initial.get("par"), jac=fmingr, args=sigma, method='L-BFGS-B',
bounds=bounds, tol=tol)
coef = dict(zip(par, M.x))
if M.status == 0:
status = "converged"
elif M.status == 1:
status = "maximum number of iterations reached without convergence"
else:
status = M.message
for var in par:
exec("%s = coef['%s']" % (var, var), globals(), locals())
try:
hess = np.linalg.inv(M.hess_inv.todense())
except:
hess = None
vector = M.x
fit = eval(right_hand_term)
dictReturn = {"formula": formula,
"nobs": nobs,
"coefficients": coef,
"fitted_values": fit,
"residuals": y - fit,
"ctrl": ctrl,
"crit": M.fun,
"initial": initial,
"Scale": sigma,
"status": status,
"hessian": hess}
return dictReturn
def nlrob_tau(formula, data, lower, upper, tol=1e-6, psi="bisquare",
ctrl=NlrobControl("tau"), tuning_chi_scale=None,
tuning_chi_tau=None):
"""
Computes a Tau-estimator for nonlinear robust (constrained) regression.
Returns a dictionary with all the variables of interest
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
lower: pandas.core.frame.DataFrame
Dataframe with the initial guesses
upper: array_like
upper bound, the shape could be 1 or the the same of lower.
psi: str
A function (possibly by name) of the form g(x, 'tuning constant(s)',
deriv)
that for deriv=0 returns (x)/x and for deriv=1 returns 0 (x).
init: str
ctrl: object
NlrobControl Class
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
fitted_values : array_like
residuals: array_like
numeric vector of the residuals.
hessian: array_like
hessian matrix
ctrl: object
NlrobControl Class
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLROBTAU.Data=Input.csv")
>>> Rfit_tau = nlrob_tau(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob:
nlrob_MM:
nlrob_CM:
nlrob_mtl:
"""
if ctrl:
psi = ctrl.psi
# Defining local variables of functions
data_names = data.keys()
for var in data_names:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
if ctrl.tuning_chi_scale is None:
if psi == "bisquare":
_chi_s = {"b": 0.2, "cc": 1.55}
elif psi == "optimal":
_chi_s = {"b": 0.5, "cc": 0.405}
if ctrl.tuning_chi_tau is None:
if psi == "bisquare":
_chi_t = {"b": 0.46, "cc": 6.04}
elif psi == "optimal":
_chi_t = {"b": 0.128, "cc": 1.06}
b1 = _chi_s.get("b")
c1 = _chi_s.get("cc")
b2 = _chi_t.get("b")
c2 = _chi_t.get("cc")
if psi == "bisquare":
b1 = b1 / MrhoInf(c1, psi)
b2 = b2 / MrhoInf(c1, psi)
rho1 = lambda t: Mchi(t, c1, psi)
rho2 = lambda t: Mchi(t, c2, psi)
if psi == "bisquare":
rho_inv = lambda y: c1 * np.sqrt(1 - (1 - y)**(1/3))
elif psi == "optimal":
rho_inv = lambda y: np.sqrt(y/1.38) * c1 * 3
M_scale = lambda sigma, u: np.sum( rho1(u/sigma) )/nobs - b1
tau_scale2 = lambda u, sigma: sigma ** 2 * 1 / b2 * np.sum(rho2(u / sigma))/ nobs
par = lower.keys()
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(par):
right_hand_term = right_hand_term.replace(pname, "vector[%d]" % i)
def objective(vector):
fit = eval(right_hand_term)
res = y - fit
med_abs_res = np.median(np.abs(res))
sigma = np.array(brentq(M_scale, constant[0] * med_abs_res,
constant[1] * med_abs_res, args=(res)))
return tau_scale2(res, sigma)
npar = len(par)
nobs = y.size
if npar > nobs:
raise Exception("npar > nobs")
if ctrl.fnscale:
fnscale = ctrl.fnscale
else:
fnscale = np.mean((y - np.mean(y)) ** 2)
if psi == "bisquare":
constant = [1 / c1]
elif psi == "optimal":
constant = [1 / c1 * 1 / 3]
constant.append(2 / rho_inv(2 / (nobs + 2)) if nobs % 2 else 1 / rho_inv( 1 /(nobs + 1)))
if npar > upper.size:
upper = np.repeat(upper, npar)
optRes = JDEoptim(lower.values[0], upper, objective, tol=tol,
fnscale=fnscale)
it = optRes.get("iter")
status = "converged" if optRes.get("convergence") == 0 else "failed to convergence in %d steps" % it
coef = dict(zip(lower.keys(), optRes.get("par")))
vector = optRes.get("par")
fit = eval(right_hand_term)
dictReturn = {"formula": formula,
"nobs": nobs,
"coefficients": coef,
"fitted_values": fit,
"residuals": y - fit,
"crit": optRes.get("value"),
"initial": optRes,
"Scale": np.sqrt(optRes.get("value")),
"status": status,
"iter": it,
"ctrl": ctrl}
return dictReturn
def nlrob_CM(formula, data, lower, upper, tol=1e-6, psi="bisquare",
ctrl=NlrobControl("CM")):
"""
Compute an MM-estimator for nonlinear robust (constrained) regression.
Returns a dictionary with all the variables of interest
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
lower: pandas.core.frame.DataFrame
Dataframe with the initial guesses
upper: array_like
upper bound, the shape could be 1 or the the same of lower.
psi: str
A function (possibly by name) of the form g(x, 'tuning constant(s)',
deriv)
that for deriv=0 returns (x)/x and for deriv=1 returns 0 (x).
init: str
ctrl: object
NlrobControl Class
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
fitted_values : array_like
residuals: array_like
numeric vector of the residuals.
hessian: array_like
hessian matrix
ctrl: object
NlrobControl Class
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLROBCM.Data=Input.csv")
>>> Rfit_CM = nlrob_cm(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob:
nlrob_MM:
nlrob_tau:
nlrob_mtl:
"""
if ctrl:
psi = ctrl.psi
if psi == "bisquare":
t_chi = {"b": 0.5, "cc":1, "c":4.835}
b = t_chi.get("b")
c = t_chi.get("c")
cc = t_chi.get("cc")
rho = lambda t: Mchi(t, cc, psi)
M_scale = lambda sigma, u: np.sum(rho(u / sigma)) / nobs - b
# Defining local variables of functions
data_names = data.keys()
for var in data_names:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
pnames = [name for name in lower.keys() if name in lower.keys() or name == "sigma"]
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(pnames):
right_hand_term = right_hand_term.replace(pname, "vector[%d]" % i)
if "sigma" in pnames:
if "sigma" in formula.split("~")[1] or "sigma" in data.keys():
raise Exception("As \"sigma\" is in 'pnames', do not use it as variable or parameter name in 'formula'")
def objective(vector):
fit = eval(right_hand_term)
sigma = vector[-1]
return c * np.sum(rho( (y - fit)/sigma ))/nobs + np.log(sigma)
def con(vector):
fit = eval(right_hand_term)
return M_scale(vector[-1], y - fit)
else:
def objective(vector):
fit = eval(right_hand_term)
res = y - fit
sigma = np.median(np.abs(res - np.median(res)))
return c * np.sum(rho(res / sigma)) / nobs + np.log(sigma)
con = None
npar = len(pnames)
nobs = y.size
if npar > nobs:
raise Exception("npar > nobs")
if ctrl.fnscale:
fnscale = ctrl.fnscale
else:
fnscale = np.mean((y - np.mean(y)) ** 2)
if npar > upper.size:
upper = np.repeat(upper, lower.size)
optRes = JDEoptim(lower.values[0], upper, objective, tol=tol,
fnscale=fnscale, constr=con)
it = optRes.get("iter")
status = "converged" if optRes.get("convergence") == 0 else "failed to convergence in %d steps" % it
coef = dict(zip(lower.keys(), optRes.get("par")))
vector = optRes.get("par")
fit = eval(right_hand_term)
dictReturn = {"formula": formula,
"nobs": nobs,
"coefficients": coef,
"fitted_values": fit,
"residuals": y - fit,
"crit": optRes.get("value"),
"status": status,
"iter": it,
"ctrl": ctrl}
return dictReturn
def nlrob_mtl(formula, data, lower, upper, tol=1e-6, psi="bisquare",
ctrl=NlrobControl("mtl")):
"""
Compute a mtl-estimator for nonlinear robust (constrained) regression
Parameters
----------
formula: str
A nonlinear formula including variables and parameters of the model,
such as y ~ f(x, theta)
data: pandas.core.frame.DataFrame
Data frame containing the variables in the model. If not found in
data, the variables are taken from environment(formula), typically
the environment from which nlrob is called.
lower: pandas.core.frame.DataFrame
Dataframe with the initial guesses
upper: array_like
upper bound, the shape could be 1 or the the same of lower.
psi: str
A function (possibly by name) of the form g(x, 'tuning constant(s)',
deriv)
that for deriv=0 returns (x)/x and for deriv=1 returns 0 (x).
init: str
ctrl: object
NlrobControl Class
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
fitted_values : array_like
residuals: array_like
numeric vector of the residuals.
hessian: array_like
hessian matrix
ctrl: object
NlrobControl Class
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLROBMTL.Data=Input.csv")
>>> Rfit_mtl = nlrob_mtl(formula, data, lower, lower, upper, method=method)
See Also
--------
nlrob:
nlrob_MM:
nlrob_tau:
nlrob_cm:
"""
if ctrl:
cutoff = ctrl.cutoff
def trim(t):
t = np.sort(t)
i = np.where(t >= cutoff)[0]
partial_h = np.min( (i - 1)/(2 * np.random.normal(t[i]) - 1))
partial_h = np.max(np.floor(partial_h))
h = np.max([hlow, partial_h]) if i.size else nobs
return {"h": h, "t": t}
rho = lambda t: Mchi(t, cc, psi)
M_scale = lambda sigma, u: np.sum(rho(u / sigma)) / nobs - b
# Defining local variables of functions
data_names = data.keys()
for var in data_names:
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
pnames = [name for name in lower.keys() if name in lower.keys() or name == "sigma"]
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for i, pname in enumerate(pnames):
right_hand_term = right_hand_term.replace(pname, "vector[%d]" % i)
constant = np.log(2 * np.pi)
if "sigma" in pnames:
if "sigma" in formula.split("~")[1] or "sigma" in data.keys():
raise Exception("As \"sigma\" is in 'pnames', do not use it as variable or parameter name in 'formula'")
def objective(vector):
fit = eval(right_hand_term)
sigma = vector[-1]
tp = trim(np.abs( (y - fit) / sigma))
h = tp.get("h")
return h * (constant + 2 * np.log(sigma)) + np.sum(tp.get("t")[:h] ** 2)
else:
def objective(vector):
fit = eval(right_hand_term)
res = y - fit
sigma = np.median(np.abs(res - np.median(res)))
tp = trim(np.abs(res / sigma))
h = int(tp.get("h"))
return h * (constant + 2 * np.log(sigma)) + np.sum(tp.get("t")[:h] ** 2)
npar = len(pnames)
nobs = y.size
if npar > nobs:
raise Exception("npar > nobs")
if ctrl.fnscale:
fnscale = ctrl.fnscale
else:
fnscale = np.mean((y - np.mean(y)) ** 2)
if npar > upper.size:
upper = np.repeat(upper, lower.size)
hlow = (nobs + npar + 1) // 2
optRes = JDEoptim(lower.values[0], upper, objective, tol=tol,
fnscale=fnscale)
it = optRes.get("iter")
status = "converged" if optRes.get("convergence") == 0 else "failed to convergence in %d steps" % it
coef = dict(zip(lower.keys(), optRes.get("par")))
vector = optRes.get("par")
fit = eval(right_hand_term)
res = y - fit
quan = trim( res/(coef["sigma"] if ("sigma" in pnames) else np.median(np.abs(res - np.median(res))))).get("h")
dictReturn = {"formula": formula,
"nobs": nobs,
"coefficients": coef,
"fitted_values": fit,
"residuals": res,
"crit": optRes.get("value"),
"quan": quan,
"status": status,
"iter": it,
"ctrl": ctrl}
return dictReturn
def nls(formula, data, start, algorithm="lm",
weights=None, lower=np.array([-np.Inf]), upper=np.array([np.Inf])):
"""
Determine the nonlinear (weighted) least-squares estimates of the parameters of a nonlinear model.
Usage
nls(formula, data, start, control, algorithm,
trace, subset, weights, na.action, model,
lower, upper, ...)
Parameters
----------
formula: str
a nonlinear model formula including variables and parameters. Will be coerced to a formula if necessary.
data: pandas.core.frame.DataFrame
Data frame in which to evaluate the variables in formula and weights. Can also be a list or an environment, but not a matrix.
start: pandas.core.frame.DataFrame
A vector of starting estimates.
algorithm: str
Character string specifying the algorithm to use. The default algorithm is a "lm" algorithm. Other possible values are 'trf', 'dogbox'
lower: scalar, array_like
Lower bounds on Parameters. An array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters.)
upper: scalar, array_like
Upper bounds on Parameters. An array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters.)
Returns
-------
coefficients: array_like
Numeric vector of coefficient estimates.
residuals: array_like
numeric vector of the residuals.
cov: array_like
covariance matrix
Examples
--------
>>> import pandas
>>> from nlrob import *
>>>
>>> formula = "density ~ Asym/(1 + np.exp(( xmid - np.log(conc) )/scal))"
>>> lower = pandas.DataFrame(data=dict(zip(["Asym", "xmid", "scal"], np.zeros(3))),
... index=[0])
>>> upper = np.array([1])
>>> data = pandas.read_csv("Submodule=NLS.Data=Input.csv")
>>> Rfit_nls = nls(formula, data, lower)
See Also
--------
nlrob:
nlrob_MM:
nlrob_tau:
nlrob_cm:
"""
for var in data.keys():
if re.search("\s", var.strip()):
continue
globals().update({var:data[var].values})
y = data[formula.split("~")[0].rstrip()].values
right_hand_term = formula.split("~")[1]
for pname in data.keys():
i = 0
if pname in right_hand_term:
break
pnames = start.keys()
p0 = start.values[0]
x = pname
if lower.any():
if lower.size != p0.size:
lower = np.repeat(lower, p0.size)
if upper.any():
if upper.size != p0.size:
upper = np.repeat(upper, p0.size)
bounds = (lower, upper)
def get_func():
env = {"np": np}
code = "def func(%s, %s):\n" % (x, ", ".join(pnames))
code += " return %s\n" % right_hand_term
exec(code, env)
return env.get("func")
other_params = "bounds=bounds, method='%s'" % algorithm
func = get_func()
par, cov = eval("curve_fit(func, %s, y, p0, %s)" % (x, other_params),
globals(), locals())
popt_str = ["par[%d]" % i for i, p in enumerate(par)]
fit = eval("func(%s, %s)" % (x, ", ".join(popt_str)))
res = y - fit
dictReturn = {"coefficients": dict(zip(start.keys(), par)),
"residuals": res,
"cov":cov}
return dictReturn | /robustbaseTEST-1.0.tar.gz/robustbaseTEST-1.0/nlrob/nlrob.py | 0.701611 | 0.364806 | nlrob.py | pypi |
from time import time
import numpy as np
from scipy.stats import multivariate_normal
class RobustGMM:
"""
A robust EM clustering algorithm for Gaussian Mixture Models
Args:
gamma:
float. Non-negative regularization added to the diagonal of
covariance. This variable is equivalent to 'reg_covar' in
sklearn.mixture.GaussianMixture.
eps:
float. The convergence threshold. This variable is equivalent to
'tol' in sklearn.mixture.GaussianMixture.
"""
def __init__(self, gamma=1e-4, eps=1e-3):
self.gamma = gamma
self.eps = eps
self.__smoothing_parameter = 1e-256
self.__training_info = []
def fit(self, X: np.ndarray):
"""
Function for training model with data X by using robust EM algorithm.
Refer 'Robust EM clustering algorithm' section in the paper page 4 for
more details.
Args:
X: Input data. Data type should be numpy array.
"""
# initialize variables
self.X = X.reshape(-1, 1) if X.ndim == 1 else X
self.dim = self.X.shape[1]
self.n = self.X.shape[0]
self.c = self.n
self.pi = np.ones(self.c) / self.c
self.means = self.X.copy()
self.__cov_idx = int(np.ceil(np.sqrt(self.c)))
self.beta = 1
self.beta_update = True
self.t = 0
self.entropy = (self.pi*np.log(self.pi)).sum()
self._initialize_covmat()
self.z = self.predict_proba(self.X)
self.before_time = time()
self._get_iter_info()
self.t += 1
self.num_update_c = 0
# robust EM algorithm
while True:
self.means = self._update_means()
self.new_pi = self._update_pi()
self._update_beta()
self.pi = self.new_pi
self.new_c = self._update_c()
if self.new_c == self.c:
self.num_update_c += 1
if self.t >= 60 and self.num_update_c == 60:
self.beta = 0
self.beta_update = False
self.c = self.new_c
self._update_cov()
self.z = self.predict_proba(self.X)
self.new_means = self._update_means()
if self._check_convergence() < self.eps:
break
self._remove_repeated_components()
self._get_iter_info()
self.t += 1
self._get_iter_info()
def predict_proba(self, X):
"""
Calculate posterior probability of each component given the data.
Args:
X: numpy array
"""
likelihood = np.zeros((self.n, self.c))
for i in range(self.c):
self.covs[i] = self._check_positive_semidefinite(self.covs[i])
dist = multivariate_normal(mean=self.means[i], cov=self.covs[i])
likelihood[:, i] = dist.pdf(X)
numerator = likelihood * self.pi + self.__smoothing_parameter
denominator = numerator.sum(axis=1)[:, np.newaxis]
z = numerator / denominator
return z
def predict(self, X):
"""
Predict the labels for the data samples in X.
Args:
X: numpy array
"""
argmax = self.predict_proba(X)
return argmax.argmax(axis=1)
def get_training_info(self):
"""
Save training record to json file.
Args:
filepath: Json file name.
"""
return self.__training_info
def _initialize_covmat(self):
"""
Covariance matrix initialize function.
"""
D_mat = np.sqrt(np.sum((self.X[None, :]-self.X[:, None])**2, -1))
self.covs = np.apply_along_axis(
func1d=lambda x: self._initialize_covmat_1d(x),
axis=1, arr=D_mat)
D_mat_reshape = D_mat.reshape(-1, 1)
d_min = D_mat_reshape[D_mat_reshape > 0].min()
self.Q = d_min*np.identity(self.dim)
def _initialize_covmat_1d(self, d_k):
"""
self._initialize_covmat() that uses np.apply_along_axis().
This function is refered term 27 in the paper.
Args:
d_k: numpy 1d array
"""
d_k = d_k.copy()
d_k.sort()
d_k = d_k[d_k != 0]
return ((d_k[self.__cov_idx] ** 2) * np.identity(self.dim))
def _update_means(self):
"""
Mean vectors update step.
This function is refered term 25 in the paper.
"""
means_list = []
for i in range(self.c):
z = self.z[:, i]
means_list.append((self.X*z.reshape(-1, 1)).sum(axis=0) / z.sum())
return np.array(means_list)
def _update_pi(self):
"""
Mixing proportions update step.
This function is refered term 13 in the paper.
"""
self.pi_EM_ = self.z.sum(axis=0) / self.n
self.entropy = (self.pi*np.log(self.pi)).sum()
return self.pi_EM_ + self.beta*self.pi*(np.log(self.pi)-self.entropy)
def _update_beta(self):
"""
Beta update step.
This function is refered term 24 in the paper.
"""
if self.beta_update:
self.beta = np.min([self._left_term_of_beta(),
self._right_term_of_beta()])
def _left_term_of_beta(self):
"""
Left term of beta update step.
This function is refered term 22 in the paper.
"""
power = np.trunc(self.dim / 2 - 1)
eta = np.min([1, 0.5 ** (power)])
return np.exp(-eta*self.n*np.abs(self.new_pi-self.pi)).sum() / self.c
def _right_term_of_beta(self):
"""
Right term of beta update step.
This function is refered term 23 in the paper.
"""
pi_EM = np.max(self.pi_EM_)
pi_old = np.max(self.pi)
return (1 - pi_EM) / (-pi_old * self.entropy)
def _update_c(self):
"""
Update the number of components.
This function is refered term 14, 15 and 16 in the paper.
"""
idx_bool = self.pi >= 1 / self.n
new_c = idx_bool.sum()
pi = self.pi[idx_bool]
self.pi = pi / pi.sum()
z = self.z[:, idx_bool]
self.z = z / z.sum(axis=1).reshape(-1, 1)
self.means = self.means[idx_bool, :]
return new_c
def _update_cov(self):
"""
Covariance matrix update step.
This function is refered term 26 and 28 in the paper.
"""
cov_list = []
for i in range(self.new_c):
new_cov = np.cov((self.X-self.means[i, :]).T,
aweights=(self.z[:, i]/self.z[:, i].sum()))
new_cov = (1-self.gamma)*new_cov-self.gamma*self.Q
cov_list.append(new_cov)
self.covs = np.array(cov_list)
def _check_convergence(self):
"""
Check whether algorithm converge or not.
"""
check = np.max(np.sqrt(np.sum((self.new_means-self.means)**2, axis=1)))
self.means = self.new_means
return check
def _check_positive_semidefinite(self, cov):
"""
Prevent error that covariance matrix is not positive semi definite.
"""
min_eig = np.min(np.linalg.eigvals(cov))
if min_eig < 0:
cov -= 10 * min_eig * np.eye(*cov.shape)
return cov
def _get_iter_info(self):
"""
Record useful information in each step
for visualization and objective function.
"""
result = {}
result['means'] = self.means
result['covs'] = self.covs
result['iteration'] = self.t
result['c'] = self.c
result['time'] = time() - self.before_time
result['mix_prob'] = self.pi
result['beta'] = self.beta
result['entropy'] = self.entropy
result['objective_function'] = self._objective_function()
self.before_time = time()
self.__training_info.append(result)
def _objective_function(self):
"""
Calculate objective function(negative log likelihood).
"""
likelihood = np.zeros((self.n, self.c))
for i in range(self.c):
likelihood[:, i] = multivariate_normal(
self.means[i], self.covs[i]).pdf(self.X)
likelihood = likelihood * self.pi
resposibility = self.predict_proba(self.X)
log_likelihood = \
np.sum(
np.log(likelihood+self.__smoothing_parameter)*resposibility) \
+ self.beta * self.entropy * self.n
return log_likelihood
def _remove_repeated_components(self):
"""
To remove repeated components during fitting for preventing the
cases that contain duplicated data.
"""
c_params = np.concatenate([self.means,
self.covs.reshape(self.c, -1),
self.pi.reshape(-1, 1)],
axis=1)
_, idx, counts = np.unique(c_params,
axis=0,
return_index=True,
return_counts=True)
self.means = self.means[idx]
self.covs = self.covs[idx]
self.pi = self.pi[idx]*counts
self.c = self.pi.shape[0]
self.z = self.z[:, idx]*counts | /robustgmm-1.0.2.tar.gz/robustgmm-1.0.2/src/robustGMM/robustGMM.py | 0.917344 | 0.572036 | robustGMM.py | pypi |
import numpy as np
class Generator_Multivariate_Normal():
"""
Class for generating n-dimensions sample data set with multivariate
normal distribution.
Args:
means: list
covs: list
mix_prob: list
"""
def __init__(self, means, covs, mix_prob):
self.n_components = len(means)
self.dim = len(means[0])
self.means = np.array(means)
self.covs = np.array(covs)
self.mix_prob = mix_prob
def get_sample(self, size):
"""
Function that generate sample data sets.
Args:
size: integer
"""
X = np.zeros((0, self.dim))
sample_size = 0
for n in range(0, self.n_components):
mean = self.means[n]
cov = self.covs[n]
if n != self.n_components - 1:
num_of_sample = int(np.around(size*self.mix_prob[n]))
sample_size += num_of_sample
else:
num_of_sample = size - sample_size
X_n = np.random.multivariate_normal(mean=mean,
cov=cov,
size=num_of_sample)
X = np.vstack([X, X_n])
return X
class Generator_Univariate_Normal():
"""
Class for generating one-dimension sample data set with univariate
normal distribution.
Args:
means: list
stds: list
mix_prob: list
"""
def __init__(self, means, stds, mix_prob):
self.n_components = len(means)
self.means = np.array(means)
self.stds_ = np.array(stds)
self.mix_prob = mix_prob
def get_sample(self, size):
"""
Function that generate sample data sets.
Args:
size: integer
"""
X = np.array([])
sample_size = 0
for n in range(0, self.n_components):
mean = self.means[n]
std = self.stds_[n]
if n != self.n_components - 1:
num_of_sample = int(np.around(size*self.mix_prob[n]))
sample_size += num_of_sample
else:
num_of_sample = size - sample_size
X_n = np.random.normal(loc=mean, scale=std, size=num_of_sample)
X = np.append(X, X_n)
return X | /robustgmm-1.0.2.tar.gz/robustgmm-1.0.2/src/robustGMM/generator.py | 0.845783 | 0.541894 | generator.py | pypi |
# Robust enhancer-gene association prediction using single cell transcriptomes and epigenomes
This repository hosts the package `robustlink`, a tool that integrates single-cell transcriptomes (scRNA-seq) and epigenomes (snATAC-seq and snmC-seq) and identifies robust associations between cis-regulatory elements (enhancers) and genes.
Reference:
- [Xie, Armand et al. 2021; Robust enhancer-gene regulation identified by single-cell transcriptomes and epigenomes](https://www.biorxiv.org/content/10.1101/2021.10.25.465795v1)
Correspondence: [Fangming Xie](mailto:f7xie@ucsd.edu) and [Eran A. Mukamel](mailto:emukamel@ucsd.edu)
# Getting started
### System requirements ###
This package is tested on a Ubuntu 18.04.6 LTS (Bionic Beaver) server. However, we expect it can be operated under a wide range of systems.
We recommend users to use a [conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html) to install dependencies. This requires users to pre-install [Anaconda](https://www.anaconda.com/products/individual).
### Installation ###
```bash
# clone this repo
git clone https://github.com/FangmingXie/robustlink.git
# setting up the environment and install dependancies using the provided `env.yml` file.
conda env create -f ./robustlink/env.yml
conda activate env_robustlink
# install this package using pip from PyPI
pip install robustlink
```
### Demo ###
The `demo/` directory contains an example usage of this package, by linking enhancer-gene pairs using single-cell transcriptomes (scRNA-seq) and epigenome (snmC-seq, snATAC-seq).
```bash
cd ./robustlink/demo
```
1. The demo data, which includes mC, ATAC, and RNA profiles for >70,000 neurons from mouse primary motor cortex, can be downloaded with [this](https://drive.google.com/file/d/1JzP6cPTWFMj4vj5-Ie8QWBl8rpfnJa37/view?usp=sharing) link. Once downloaded, decompress it with the following command. For detailed description of data files, see `README_demodata.txt`.
```bash
# decompress the data under the demo/ directory
tar -zxvf demodata.tar.gz
```
*After the data is in place. The rest of the demo takes about 5 minutes to run through.*
2. With the `demodata` in place, you can run through the entire enhancer-gene association analysis with the following command:
```bash
# run the two scripts under demo/ that links mCG-RNA and ATAC-RNA respectively.
./link_mc_rna.sh && ./link_atac_rna.sh
```
This will generate a result folder `demoresults` that includes integrated datasets, metacells, and correlations between enhancers and genes for mC-RNA and ATAC-RNA, respectively. For speed, this demo only randomly samples 10% cells from each dataset. However, the users can readily make the change to use more cells by tuning the `-s/--subsample_frac` argument.
3. To visualize the results, run through the `visualize_links.ipynb` notebook, which generates visualizations with a few simple commands backed by our customized `CorrRes` class. See section **Visualization** below for more details.


### Prepare your data ###
You need to prepare your data as in the demo in `.h5ad` ([AnnData](https://anndata.readthedocs.io/en/latest/)) format. Specifically, for each dataset you need:
- `counts_${dataset}.h5ad`: a count matrix (cell-by-gene for RNA; cell-by-enhancer for mC and ATAC) of the single-cell transcriptome/epigenome data.
- `gene_profiles_${dataset}.h5ad`: a gene-level feature matrix (average gene-level DNA methylation or ATAC signals for epigenome data). This information is not directly used for enhancer-gene association, but only to integrate cells from different datasets to identify cross-dataset metacells.
In addition, you need annotation file tables (.tsv):
- a gene list
- an enhancer list
- a enhancer-gene pair list (e.g. all pairs within ~1Mbp)
### CLI ###
On the top level, there are three main functions to choose from:
- `scfusion`: integrate datasets (transcriptomes and epigenomes)
- `metacell`: generate metacells (cell clusters) that are shared across datasets
- `corr_mc` or `corr_atac`: correlate enhancer epigenetic signals (mc or atac) with gene expression
Run ```python -m robustlink --help``` will show the available secondary options to choose from:
```
usage: python -m robustlink [-h] {scfusion,metacell,corr_mc,corr_atac} ...
```
Each option requires specific input arguments that can be revealed by their own `--help` function.
```bash
python -m robustlink scfusion --help
```
```
usage: python -m robustlink scfusion [-h] -i DATA_DIR -o OUTDIR -id INPUT_DATASETS [INPUT_DATASETS ...] -im INPUT_MODALITIES
[INPUT_MODALITIES ...] -fd FEATURE_DATASETS [FEATURE_DATASETS ...] [-tag NAMETAG]
[--ka_smooth KA_SMOOTH] [--knn KNN] [-s SUBSAMPLE_FRACTION] [-sn SUBSAMPLE_TIMES]
[--relaxation RELAXATION] [--drop_npcs DROP_NPCS]
[--smoothing_fractions SMOOTHING_FRACTIONS [SMOOTHING_FRACTIONS ...]] [--num_pcs NUM_PCS]
...
```
```bash
python -m robustlink metacell --help
```
```
usage: python -m robustlink metacell [-h] -i INPUT_DATASET -o OUT_DIR -tag INPUT_NAME_TAG -sn SUBSAMPLE_TIMES -r RESOLUTIONS
[RESOLUTIONS ...]
...
```
```bash
python -m robustlink corr_mc --help
```
```
usage: python -m robustlink corr_mc [-h] --tolink TOLINK --countdata_gene COUNTDATA_GENE --countdata_enh COUNTDATA_ENH -o OUT_DIR
--scfusion_dir SCFUSION_DIR --fusiondata_rna FUSIONDATA_RNA --fusiondata_mc FUSIONDATA_MC -tag
INPUT_NAME_TAG [-isub I_SUB] [-ct {pearsonr,spearmanr}] [-f] [-n NUM_METACELL_LIMIT]
...
```
### Visualization ###
We designed the visualization module to be simple and flexible to use at the same time. Once setting up the `CorrRes` object, it takes one line to generate visualizations. Below are a few examples.
```python
# corr_res_mc is a CorrRes object generated as in visualize_links.ipynb
corr_res_mc.plot_corr_vs_dist()
```

```python
corr_res_atac.plot_corr_vs_dist()
```

```python
corr_res_mc.plot_corr_vs_dist()
```

```python
corr_res_atac.plot_corr_vs_dist()
```

With a few minor changes, we can combine the two plots above into a single one, and control other figure properties as you like using the `axes` handles from [matplotlib](https://matplotlib.org/stable/index.html).
```python
fig, ax = plt.subplots(figsize=(6,4))
corr_res_mc .plot_corr_vs_dist(ax)
corr_res_atac.plot_corr_vs_dist(ax)
plt.show()
```

Below is a more complex example of how we can combine subplots.
```python
cols = [
'frac_tp',
'sig_frac_tp',
'num_pos',
'sig_num_pos',
]
fig, axs = plt.subplots(2, 2, figsize=(8*2,5*2), sharex=False, sharey='row')
for ax, col in zip(axs.flat, cols):
corr_res_mc .plot_dist_dep(col, 'linked', ax=ax)
corr_res_mc .plot_dist_dep(col, 'correlated', ax=ax)
corr_res_atac.plot_dist_dep(col, 'linked', ax=ax)
corr_res_atac.plot_dist_dep(col, 'correlated', ax=ax)
axs[0,1].legend(bbox_to_anchor=(1,1))
fig.tight_layout()
plt.show()
```

| /robustlink-0.1.4.tar.gz/robustlink-0.1.4/README.md | 0.774157 | 0.982406 | README.md | pypi |
robustness package
==================
Install via ``pip``: ``pip install robustness``
Read the docs: https://robustness.readthedocs.io/en/latest/index.html
``robustness`` is a package we (students in the `MadryLab <http://madry-lab.ml>`_) created
to make training, evaluating, and exploring neural networks flexible and easy.
We use it in almost all of our projects (whether they involve
adversarial training or not!) and it will be a dependency in many of our
upcoming code releases. A few projects using the library include:
- `Code for "Learning Perceptually-Aligned Representations via Adversarial Robustness" <https://github.com/MadryLab/robust_representations>`_ (https://arxiv.org/abs/1906.00945)
- `Code for
"Image Synthesis with a Single (Robust) Classifier" <https://github.com/MadryLab/robustness_applications>`_ (https://arxiv.org/abs/1906.09453)
We
demonstrate how to use the library in a set of walkthroughs and our API
reference. Functionality provided by the library includes:
- Training and evaluating standard and robust models for a variety of
datasets/architectures using a `CLI interface
<https://robustness.readthedocs.io/en/latest/example_usage/cli_usage.html>`_. The library also provides support for adding
`custom datasets <https://robustness.readthedocs.io/en/latest/example_usage/training_lib_part_2.html#training-on-custom-datasets>`_ and `model architectures <https://robustness.readthedocs.io/en/latest/example_usage/training_lib_part_2.html#training-with-custom-architectures>`_.
.. code-block:: bash
python -m robustness.main --dataset cifar --data /path/to/cifar \
--adv-train 0 --arch resnet18 --out-dir /logs/checkpoints/dir/
- Performing `input manipulation
<https://robustness.readthedocs.io/en/latest/example_usage/input_space_manipulation.html>`_ using robust (or standard)
models---this includes making adversarial examples, inverting representations,
feature visualization, etc. The library offers a variety of optimization
options (e.g. choice between real/estimated gradients, Fourier/pixel basis,
custom loss functions etc.), and is easily extendable.
.. code-block:: python
import torch as ch
from robustness.datasets import CIFAR
from robustness.model_utils import make_and_restore_model
ds = CIFAR('/path/to/cifar')
model, _ = make_and_restore_model(arch='resnet50', dataset=ds,
resume_path='/path/to/model', state_dict_path='model')
model.eval()
attack_kwargs = {
'constraint': 'inf', # L-inf PGD
'eps': 0.05, # Epsilon constraint (L-inf norm)
'step_size': 0.01, # Learning rate for PGD
'iterations': 100, # Number of PGD steps
'targeted': True # Targeted attack
'custom_loss': None # Use default cross-entropy loss
}
_, test_loader = ds.make_loaders(workers=0, batch_size=10)
im, label = next(iter(test_loader))
target_label = (label + ch.randint_like(label, high=9)) % 10
adv_out, adv_im = model(im, target_label, make_adv, **attack_kwargs)
- Importing ``robustness`` as a package, which allows for easy training of
neural networks with support for custom loss functions, logging, data loading,
and more! A good introduction can be found in our two-part walkthrough
(`Part 1 <https://robustness.readthedocs.io/en/latest/example_usage/training_lib_part_1.html>`_,
`Part 2 <https://robustness.readthedocs.io/en/latest/example_usage/training_lib_part_2.html>`_).
.. code-block:: python
from robustness import model_utils, datasets, train, defaults
from robustness.datasets import CIFAR
# We use cox (http://github.com/MadryLab/cox) to log, store and analyze
# results. Read more at https//cox.readthedocs.io.
from cox.utils import Parameters
import cox.store
# Hard-coded dataset, architecture, batch size, workers
ds = CIFAR('/path/to/cifar')
m, _ = model_utils.make_and_restore_model(arch='resnet50', dataset=ds)
train_loader, val_loader = ds.make_loaders(batch_size=128, workers=8)
# Create a cox store for logging
out_store = cox.store.Store(OUT_DIR)
# Hard-coded base parameters
train_kwargs = {
'out_dir': "train_out",
'adv_train': 1,
'constraint': '2',
'eps': 0.5,
'attack_lr': 1.5,
'attack_steps': 20
}
train_args = Parameters(train_kwargs)
# Fill whatever parameters are missing from the defaults
train_args = defaults.check_and_fill_args(train_args,
defaults.TRAINING_ARGS, CIFAR)
train_args = defaults.check_and_fill_args(train_args,
defaults.PGD_ARGS, CIFAR)
# Train a model
train.train_model(train_args, m, (train_loader, val_loader), store=out_store)
**Note**: ``robustness`` requires PyTorch to be installed with CUDA support.
Pretrained models
-----------------
Along with the training code, we release a number of pretrained models for
different datasets, norms and ε-train values. This list will be updated as
we release more or improved models. *Please cite this library (see bibtex
entry below) if you use these models in your research.*
For each (model, ε-test) combination we evaluate 20-step and 100-step PGD with a
step size of `2.5 * ε-test / num_steps`. Since these two accuracies are quite
close to each other, we do not consider more steps of PGD.
For each value of ε-test, we highlight the best robust accuracy achieved over
different ε-train in bold.
**Note #1**: We did not perform any hyperparameter tuning and simply used the same
hyperparameters as standard training. It is likely that exploring different
training hyperparameters will increasse these robust accuracies by a few percent
points.
**Note #2**: The pytorch checkpoint (``.pt``) files below were saved with the following versions of PyTorch and Dill:
.. code-block::
torch==1.1.0
dill==0.2.9
CIFAR10 L2-norm (ResNet50):
- `ε = 0.0 <https://www.dropbox.com/s/yhpp4yws7sgi6lj/cifar_nat.pt?dl=0>`_ (standard training)
- `ε = 0.25 <https://www.dropbox.com/s/2qsp7pt6t7uo71w/cifar_l2_0_25.pt?dl=0>`_
- `ε = 0.5 <https://www.dropbox.com/s/1zazwjfzee7c8i4/cifar_l2_0_5.pt?dl=0>`_
- `ε = 1.0 <https://www.dropbox.com/s/s2x7thisiqxz095/cifar_l2_1_0.pt?dl=0>`_
+--------------+----------------+-----------------+---------------------+---------------------+
| CIFAR10 L2-robust accuracy |
+--------------+----------------+-----------------+---------------------+---------------------+
| | ε-train |
+--------------+----------------+-----------------+---------------------+---------------------+
| ε-test | 0.0 | 0.25 | 0.5 | 1.0 |
+==============+================+=================+=====================+=====================+
| 0.0 | **95.25% / -** | 92.77% / - | 90.83% / - | 81.62% / - |
+--------------+----------------+-----------------+---------------------+---------------------+
| 0.25 | 8.66% / 7.34% | 81.21% / 81.19% | **82.34% / 82.31%** | 75.53% / 75.53% |
+--------------+----------------+-----------------+---------------------+---------------------+
| 0.5 | 0.28% / 0.14% | 62.30% / 62.13% | **70.17% / 70.11%** | 68.63% / 68.61% |
+--------------+----------------+-----------------+---------------------+---------------------+
| 1.0 | 0.00% / 0.00% | 21.18% / 20.66% | 40.47% / 40.22% | **52.72% / 52.61%** |
+--------------+----------------+-----------------+---------------------+---------------------+
| 2.0 | 0.00% / 0.00% | 0.58% / 0.46% | 5.23% / 4.97% | **18.59% / 18.05%** |
+--------------+----------------+-----------------+---------------------+---------------------+
CIFAR10 Linf-norm (ResNet50):
- ε = 0.0 (PyTorch pre-trained)
- `ε = 8/255 <https://www.dropbox.com/s/c9qlt1lbdnu9tlo/cifar_linf_8.pt?dl=0>`_
+--------------+-----------------+---------------------+
| CIFAR10 Linf-robust accuracy |
+--------------+-----------------+---------------------+
| | ε-train |
+--------------+-----------------+---------------------+
| ε-test | 0 / 255 | 8 / 255 |
+==============+=================+=====================+
| 0 / 255 | **95.25% / -** | 87.03% / - |
+--------------+-----------------+---------------------+
| 8 / 255 | 0.00% / 0.00% | **53.49% / 53.29%** |
+--------------+-----------------+---------------------+
| 16 / 255 | 0.00% / 0.00% | **18.13% / 17.62%** |
+--------------+-----------------+---------------------+
ImageNet L2-norm (ResNet50):
- ε = 0.0 (PyTorch pre-trained)
- `ε = 3.0 <https://www.dropbox.com/s/knf4uimlqsi1yz8/imagenet_l2_3_0.pt?dl=0>`_
+--------------+-----------------+---------------------+
| ImageNet L2-robust accuracy |
+--------------+-----------------+---------------------+
| | ε-train |
+--------------+-----------------+---------------------+
| ε-test | 0.0 | 3.0 |
+==============+=================+=====================+
| 0.0 | **76.13% / -** | 57.90% / - |
+--------------+-----------------+---------------------+
| 0.5 | 3.35% / 2.98% | **54.42% / 54.42%** |
+--------------+-----------------+---------------------+
| 1.0 | 0.44% / 0.37% | **50.67% / 50.67%** |
+--------------+-----------------+---------------------+
| 2.0 | 0.16% / 0.14% | **43.04% / 43.02%** |
+--------------+-----------------+---------------------+
| 3.0 | 0.13% / 0.12% | **35.16% / 35.09%** |
+--------------+-----------------+---------------------+
ImageNet Linf-norm (ResNet50):
- ε = 0.0 (PyTorch pre-trained)
- `ε = 4 / 255 <https://www.dropbox.com/s/axfuary2w1cnyrg/imagenet_linf_4.pt?dl=0>`_
- `ε = 8 / 255 <https://www.dropbox.com/s/yxn15a9zklz3s8q/imagenet_linf_8.pt?dl=0>`_
+--------------+-----------------+---------------------+---------------------+
| ImageNet Linf-robust accuracy |
+--------------+-----------------+---------------------+---------------------+
| | ε-train |
+--------------+-----------------+---------------------+---------------------+
| ε-test | 0.0 | 4 / 255 | 8 / 255 |
+==============+=================+=====================+=====================+
| 0 / 255 | **76.13% / -** | 62.42% / - | 47.91% / - |
+--------------+-----------------+---------------------+---------------------+
| 4 / 255 | 0.04% / 0.03% | **33.58% / 33.38%** | 33.06% / 33.03% |
+--------------+-----------------+---------------------+---------------------+
| 8 / 255 | 0.01% / 0.01% | 13.13% / 12.73% | **19.63% / 19.52%** |
+--------------+-----------------+---------------------+---------------------+
| 16 / 255 | 0.01% / 0.01% | 1.53% / 1.37% | **5.00% / 4.82%** |
+--------------+-----------------+---------------------+---------------------+
Citation
--------
If you use this library in your research, cite it as
follows:
.. code-block:: bibtex
@misc{robustness,
title={Robustness (Python Library)},
author={Logan Engstrom and Andrew Ilyas and Hadi Salman and Shibani Santurkar and Dimitris Tsipras},
year={2019},
url={https://github.com/MadryLab/robustness}
}
*(Have you used the package and found it useful? Let us know!)*.
Maintainers
-------------
- `Andrew Ilyas <https://twitter.com/andrew_ilyas>`_
- `Logan Engstrom <https://twitter.com/logan_engstrom>`_
- `Shibani Santurkar <https://twitter.com/ShibaniSan>`_
- `Dimitris Tsipras <https://twitter.com/tsiprasd>`_
- `Hadi Salman <https://twitter.com/hadisalmanX>`_
Contributors/Commiters
'''''''''''''''''''''''
- See `here <https://github.com/MadryLab/robustness/pulse>`_
| /robustness-1.2.post1.tar.gz/robustness-1.2.post1/README.rst | 0.96118 | 0.742562 | README.rst | pypi |
<div align="center">
<img src="docs/logo.png" height=100 alt="RG logo"/>
<h1 style="font-family: 'IBM Plex Sans'">Robustness Gym</h1>
</div>


[](https://robustnessgym.readthedocs.io/en/latest/?badge=latest)
[](https://github.com/pre-commit/pre-commit)
[](https://robustnessgym.com)
[comment]: <> ([](https://codecov.io/gh/robustness-gym/robustness-gym))
Robustness Gym is a Python evaluation toolkit for machine learning models.
[**Getting Started**](#getting-started)
| [**What is Robustness Gym?**](#what-is-robustness-gym)
| [**Docs**](https://robustnessgym.readthedocs.io/en/latest/index.html)
| [**Contributing**](CONTRIBUTING.md)
| [**About**](#about)
### Getting started
```
pip install robustnessgym
```
> Note: some parts of Robustness Gym rely on optional dependencies.
> If you know which optional dependencies you'd like to install,
> you can do so using something like `pip install robustnessgym[dev,text]` instead.
> See `setup.py` for a full list of optional dependencies.
### What is Robustness Gym?
Robustness Gym is being developed to address challenges in evaluating machine
learning models today, with tools to evaluate and visualize the quality of machine
learning models.
Along with [Meerkat](https://github.com/robustness-gym/mosaic),
we make it easy for you to load in any kind of data
(text, images, videos, time-series) and quickly evaluate how well your models are
performing.
### Using Robustness Gym
```python
import robustnessgym as rg
# Load any dataset
sst = rg.DataPanel.from_huggingface('sst', split='validation')
# Load any model
sst_model = rg.HuggingfaceModel('distilbert-base-uncased-finetuned-sst-2-english', is_classifier=True)
# Generate predictions for first 2 examples in dataset using "sentence" column as input
predictions = sst_model.predict_batch(sst[:2], ['sentence'])
# Run inference on an entire dataset & store the predictions in the dataset
sst = sst.update(lambda x: sst_model.predict_batch(x, ['sentence']), batch_size=4, is_batched_fn=True, pbar=True)
# Create a DevBench, which will contain slices to evaluate
sst_db = rg.DevBench()
# Add slices of data; to begin with let's add the full dataset
# Slices are just datasets that you can track performance on
sst_db.add_slices([sst])
# Let's add another slice by filtering examples containing negation words
sst_db(rg.HasNegation(), sst, ['sentence'])
# Add any metrics you like
sst_db.add_aggregators({
# Map from model name to dictionary of metrics
'distilbert-base-uncased-finetuned-sst-2-english': {
# This function uses the predictions we stored earlier to calculate accuracy
'accuracy': lambda dp: (dp['label'].round() == dp['pred'].numpy()).mean()
}
})
# Create a report
report = sst_db.create_report()
# Visualize: requires installing plotly support in Jupyter, generally works better in Jupyter notebooks (rather than Jupyter Lab)
report.figure()
# Alternatively, save report to file
report.figure().write_image('sst_db_report.png', engine='kaleido')
```
#### Applying Built-in Subpopulations
```python
# Create a slicebuilder that creates subpopulations based on length, in this case the bottom and top 10 percentile.
length_sb = rg.NumTokensSubpopulation(intervals=[("0%", "10%"), ("90%", "100%")])
slices, membership = length_sb(dp=sst, columns=['sentence'])
# `slices` is a list of 2 DataPanel objects
# `membership` is a matrix of shape (n x 2)
for sl in slices:
print(sl.identifier)
```
#### Creating Custom Subpopulations
```python
def length(batch: rg.DataPanel, columns: list):
return [len(text.split()) for text in batch[columns[0]]]
# Create a subpopulation that buckets examples based on length
length_sp = rg.ScoreSubpopulation(intervals=[(0, 10), (10, 20)], score_fn=length)
slices, membership = length_sp(dp=sst, columns=['sentence'])
for sl in slices:
print(sl.identifier)
```
### About
You can read more about the ideas underlying Robustness Gym in our
paper on [arXiv](https://arxiv.org/pdf/2101.04840.pdf).
The Robustness Gym project began as a collaboration between [Stanford Hazy
Research](https://hazyresearch.stanford.edu), [Salesforce Research](https://einstein.ai
) and [UNC Chapel-Hill](http://murgelab.cs.unc.edu/). We also have a
[website](https://robustnessgym.com).
If you use Robustness Gym in your work, please use the following BibTeX entry,
```
@inproceedings{goel-etal-2021-robustness,
title = "Robustness Gym: Unifying the {NLP} Evaluation Landscape",
author = "Goel, Karan and
Rajani, Nazneen Fatema and
Vig, Jesse and
Taschdjian, Zachary and
Bansal, Mohit and
R{\'e}, Christopher",
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Demonstrations",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2021.naacl-demos.6",
pages = "42--55",
}
``` | /robustnessgym-0.1.3.tar.gz/robustnessgym-0.1.3/README.md | 0.902748 | 0.984094 | README.md | pypi |
import logging
import os
import warnings
from datetime import datetime
from typing import Callable, List, Union
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from statsmodels.stats.outliers_influence import variance_inflation_factor
def number_of_days_in_a_year(year):
number_of_days = sum([pd.Period(f"{year}-{i}-1").daysinmonth for i in range(1, 13)])
return number_of_days
def number_of_weeks_in_a_year(year):
number_of_weeks = pd.Period(f"{year}-12-28").week
return number_of_weeks
def number_of_days_in_a_month(month, year):
number_of_days = pd.Period(f"{year}-{month}-1").daysinmonth
return number_of_days
def number_of_weeks_in_a_month(month, year):
start_of_month = pd.Timestamp(f"{year}-{month}-1").isocalendar()[1]
end_of_month = (
pd.Timestamp(f"{year}-{month}-1") + pd.tseries.offsets.MonthEnd(1)
).isocalendar()[1]
number_of_weeks = end_of_month - start_of_month + 1
if number_of_weeks < 0:
number_of_weeks = (
end_of_month + number_of_weeks_in_a_year(year) - start_of_month + 1
)
return number_of_weeks
def color_map(condition):
if condition:
return "red"
else:
return "grey"
class DataReviewer:
"""
DataReviewer is the class that contains all the functionalities of data review outlined in the Roybyn documentation seen here: https://facebookexperimental.github.io/Robyn/docs/analysts-guide-to-MMM/#data-review
Specifically it covers:
1. Provide descriptive statistics or a basic overview of the collected data inputs (useful to help determine if there is any missing or incomplete data and can help identify the specific variable (e.g. media channel) that requires further investigation)
2. Help analyse the correlation between all the different variables (multicollinearity detection, expected impact estimation on dependent variable)
3. Help check for the accuracy of the collected data (spend share and trend)
"""
def __init__(
self,
dep_var: str,
paid_media_vars: List[str],
paid_media_spends: List[str],
extra_vars: Union[
str, List[str]
] = None, # combination of context vars and organic vars
date_var: str = "DATE",
date_format: Union[str, None] = None,
file_path: str = None,
data_frame: pd.DataFrame = pd.DataFrame(),
date_frequency: str = "weekly",
review_output_dir: str = "review_output",
) -> None:
"""
:param dep_var:
:param paid_media_vars:
:param paid_media_spends:
:param extra_vars:
:param date_var:
:param date_format:
:param file_path:
:param data_frame:
:param date_frequency:
:param review_output_dir:
"""
data = self._read_input_data_source(file_path, data_frame)
self.paid_media_vars = paid_media_vars
self.dep_var = dep_var
self.extra_vars = extra_vars
self.indep_vars = paid_media_vars + extra_vars
self.date_var = date_var
assert len(paid_media_spends) == len(
paid_media_vars
), "there should be as many as paid media spend variables as the paid media variables"
self.paid_media_spends = paid_media_spends
if not os.path.isdir(review_output_dir):
os.mkdir(review_output_dir)
output_folder = datetime.now().strftime("%Y%m%d%H%M%S")
self.output_dir = os.path.join(review_output_dir, output_folder)
if not os.path.isdir(self.output_dir):
os.mkdir(self.output_dir)
# copy raw data over so there is a reference on the data that generated this
data.to_csv(os.path.join(self.output_dir, "original_data.csv"), index=False)
# convert the data's date column upfront
data[date_var] = pd.to_datetime(data[date_var], format=date_format)
self.data = data[
list(
set(
[self.date_var]
+ self.indep_vars
+ [self.dep_var]
+ self.paid_media_spends
)
)
]
self._check_numerical_columns()
self.date_frequency = date_frequency
self._check_date_frequency()
self.logger = logging.getLogger("robyn_data_review")
@staticmethod
def _read_input_data_source(
file_path: str, data_frame: pd.DataFrame
) -> pd.DataFrame:
if file_path is None:
if len(data_frame) == 0:
raise ValueError(
"Need to have at least one source of input for the data reviewer to ingest"
)
else:
return data_frame
else:
return pd.read_csv(file_path)
def _check_date_frequency(self, threshold: float = 0.8) -> None:
assert pd.api.types.is_datetime64_any_dtype(self.data[self.date_var])
assert self.date_frequency in [
"weekly",
"daily",
], "only daily or weekly frequency is currently supported"
temp_df = self.data.set_index(self.date_var)
if self.date_frequency == "daily":
counts = temp_df[temp_df.columns[0]].resample("1D").count()
else:
counts = temp_df[temp_df.columns[0]].resample("1W").count()
if (counts == 1).mean() < threshold:
warnings.warn(
"Please check if the date frequency is rightly selected or check if there are substantial time gaps "
"in the dataset"
)
return None
def _check_numerical_columns(self):
all_numerical_columns = list(
set(self.indep_vars + [self.dep_var] + self.paid_media_spends)
)
for column in all_numerical_columns:
assert pd.api.types.is_numeric_dtype(
self.data[column]
), f"the input data column {column} from paid media vars + paid media spends + extra vars + dep vars should be of numerical type"
return None
def plot_missing_values(self) -> None:
percent_missing = self.data.isnull().sum() / len(self.data)
percent_nonmissing = 1 - percent_missing
data_completeness = pd.DataFrame(
{
"Data is complete": percent_nonmissing,
"Has missing data": percent_missing,
}
)
data_completeness = data_completeness * 100
ax = data_completeness.plot.barh(
color={"Data is complete": "grey", "Has missing data": "red"},
stacked=True,
figsize=(20, 40),
)
ax.legend(loc="center left", bbox_to_anchor=(1.0, 0.5))
for p in ax.containers[0].patches:
ax.annotate(
str(round(p.get_width())) + "%",
((p.get_x() + 100) * 1.005, p.get_y() * 1.005),
)
plt.xlabel("Examine the variables with missing data (highlighted in red above)")
plt.ylabel("Varaibles")
plt.figtext(0.5, 0.01, f"total number of observations is {len(self.data)}")
plt.tight_layout()
plt.savefig(os.path.join(self.output_dir, "missing_values_overall.png"))
plt.close()
return None
def plot_missing_data_in_a_year(
self, threshold: float = 0.05, color_map_fun: Callable = color_map
) -> None:
yearly_observation_count = self.data.groupby(self.data[self.date_var].dt.year)[
self.date_var
].count()
yearly_observation = pd.DataFrame(yearly_observation_count).rename(
columns={self.date_var: "number_of_observations"}
)
if self.date_frequency == "weekly":
yearly_observation["max_number_of_date_unit"] = [
number_of_weeks_in_a_year(x) for x in yearly_observation.index
]
else:
yearly_observation["max_number_of_date_unit"] = [
number_of_days_in_a_year(x) for x in yearly_observation.index
]
yearly_observation["diff_perc"] = (
yearly_observation.max_number_of_date_unit
- yearly_observation.number_of_observations
) / yearly_observation.max_number_of_date_unit
colors = list(
map(color_map_fun, (yearly_observation["diff_perc"] >= threshold).tolist())
)
ax = yearly_observation.plot.bar(
y="number_of_observations",
rot=0,
color=colors,
legend=False,
figsize=(10, 6),
)
for p in ax.patches:
ax.annotate(
str(p.get_height()),
(p.get_x() + p.get_width() / 2, p.get_height() * 1.01),
)
red_patch = mpatches.Patch(color="red", label=f">= {threshold}%")
blue_patch = mpatches.Patch(color="grey", label=f"< {threshold}%")
plt.legend(
title="% difference vs. max number of observations per year",
title_fontsize=13,
prop={"size": 13},
bbox_to_anchor=(1.02, 1),
handles=[red_patch, blue_patch],
)
plt.xlabel("Year")
plt.tight_layout()
plt.savefig(os.path.join(self.output_dir, "missing_yearly_values.png"))
plt.close()
return None
def plot_monthly_tally_of_observations(self, year_to_investigate: int):
full_df_year = self.data[
self.data[self.date_var].dt.year == year_to_investigate
]
monthly_observation_count = full_df_year.groupby(
full_df_year[self.date_var].dt.month
)[self.date_var].count()
monthly_observation = pd.DataFrame(monthly_observation_count).rename(
columns={"full_date": "number_of_observations"}
)
if self.date_frequency == "weekly":
monthly_observation["max_number_of_date_unit"] = [
number_of_weeks_in_a_month(x, year_to_investigate)
for x in monthly_observation.index
]
else:
monthly_observation["max_number_of_date_unit"] = [
number_of_days_in_a_month(x, year_to_investigate)
for x in monthly_observation.index
]
ax = monthly_observation.plot.bar(rot=0, figsize=(10, 6))
ax.legend(
labels=[
"Number of observations in a month",
"Max number of observations in a month",
],
bbox_to_anchor=(1.02, 1),
)
for container in ax.containers:
for p in container.patches:
ax.annotate(
str(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.005)
)
plt.xlabel("Month")
plt.title(f"Total count of observation for input data in {year_to_investigate}")
plt.tight_layout()
plt.savefig(
os.path.join(
self.output_dir,
f"missing_monthly_values_of_year_{year_to_investigate}.png",
)
)
plt.close()
return None
def plot_correlation_heat_map_for_independent_vars(self, fig_size=(16, 12)):
data_for_indep_correlation = self.data[self.indep_vars]
# Calculate pairwise-correlation
matrix = data_for_indep_correlation.corr()
# Create a mask
mask = np.triu(np.ones_like(matrix, dtype=bool))
# Create a custom divergin palette
cmap = sns.diverging_palette(
250, 15, s=75, l=40, n=9, center="light", as_cmap=True
)
plt.figure(figsize=fig_size)
sns.heatmap(
matrix, mask=mask, center=0, annot=True, fmt=".2f", square=True, cmap=cmap
)
plt.tight_layout()
plt.savefig(
os.path.join(
self.output_dir,
f"independent_variable_correlation_heatmap.png",
)
)
plt.close()
return None
def compute_vif(self, threshold=10):
"""Compute Variance Inflation Factor (VIF) for the independent variable to detect multi-colinearity"""
data_for_vif = self.data[self.indep_vars]
# VIF dataframe
vif_data = pd.DataFrame()
vif_data["feature"] = data_for_vif.columns
# calculating VIF for each feature
vif_data["VIF"] = [
variance_inflation_factor(data_for_vif.values, i)
for i in range(len(data_for_vif.columns))
]
pct_of_vars_violations = round((vif_data["VIF"] > threshold).mean() * 100)
self.logger.info(
f"{pct_of_vars_violations}% of the independent variables violate the vif threshold out of {len(vif_data['VIF'])} variables"
)
vif_data.to_csv(
os.path.join(
self.output_dir,
f"vif_independent_vars.csv",
)
)
return vif_data
def plot_correlation_dep_var(self, fig_size=(16, 12)):
columns_to_include = self.indep_vars + [self.dep_var]
correlation_to_dep = self.data[columns_to_include].corr()[self.dep_var]
plt.figure(figsize=fig_size)
ax = (
correlation_to_dep[correlation_to_dep.index != self.dep_var]
.round(2)
.sort_values(ascending=False)
.plot.bar()
)
for p in ax.patches:
ax.annotate(str(p.get_height()), (p.get_x() * 1.005, p.get_height()))
plt.title("Correlation with dependent varialbe")
plt.xlabel(
"Examine the variables with high correlation to see if it is expected or not"
)
plt.tight_layout()
plt.savefig(
os.path.join(
self.output_dir,
f"correlation_between_dependent_vars_and_independent_vars.png",
)
)
plt.close()
return None
def plot_cost_share_trend(self, fig_size=(16, 12)):
costs = self.data[self.paid_media_spends + [self.date_var]].set_index(
self.date_var
)
costs_pct = costs.div(costs.sum(axis=1), axis=0)
plt.figure()
ax = costs_pct.plot.area(colormap="Paired", figsize=fig_size)
ax.legend(loc="center left", bbox_to_anchor=(1.0, 0.5))
plt.xlabel("date")
plt.ylabel("percentage of total costs")
plt.title("Share of total media spend per channel")
plt.tight_layout()
plt.savefig(
os.path.join(
self.output_dir,
f"cost_share_trend_plot.png",
)
)
plt.close()
return None
def plot_cost_trend(self, fig_size=(16, 12)):
costs = self.data[self.paid_media_spends + [self.date_var]].set_index(
self.date_var
)
plt.figure()
costs.sum(axis=1).plot.line(figsize=fig_size)
plt.xlabel("date")
plt.ylabel("total costs")
plt.title("Total media spend in the same time period")
plt.tight_layout()
plt.savefig(
os.path.join(
self.output_dir,
f"cost_overall_trend_plot.png",
)
)
plt.close()
return None
def plot_kpi_trend(self, fig_size=(16, 12)):
kpi = self.data[[self.date_var, self.dep_var]].set_index(self.date_var)
plt.figure()
kpi.plot.line(figsize=fig_size)
plt.xlabel("date")
plt.ylabel("Gross revenue for new students")
plt.title("KPI in the same time period")
plt.tight_layout()
plt.savefig(
os.path.join(
self.output_dir,
f"kpi_overall_trend_plot.png",
)
)
plt.close()
return None
def plot_media_trend(self):
media_trend_df = self.data[self.paid_media_spends + [self.date_var]].copy()
media_trend_df["year"] = media_trend_df[self.date_var].dt.year
media_trend_df["day"] = media_trend_df[self.date_var].dt.dayofyear
for col in self.paid_media_spends:
rel = sns.relplot(
data=media_trend_df, x="day", y=col, col="year", kind="line"
)
rel.fig.suptitle(f"Trend for {col}")
rel.fig.subplots_adjust(top=0.8)
plt.savefig(os.path.join(self.output_dir, f"{col}_trend_plots.png"))
plt.close()
return None
def run_review(self):
self.plot_missing_values()
self.plot_missing_data_in_a_year()
unique_years = self.data[self.date_var].dt.year.unique()
for year in unique_years:
self.plot_monthly_tally_of_observations(year)
self.plot_correlation_heat_map_for_independent_vars()
self.compute_vif()
self.plot_correlation_dep_var()
self.plot_kpi_trend()
self.plot_media_trend()
self.plot_cost_trend()
self.plot_cost_share_trend()
return None | /robyn_data_validation-0.0.5.tar.gz/robyn_data_validation-0.0.5/src/robyn_data_validation/data_review.py | 0.758063 | 0.504578 | data_review.py | pypi |
<p align="center"><img alt="Robyn Logo" src="https://user-images.githubusercontent.com/29942790/140995889-5d91dcff-3aa7-4cfb-8a90-2cddf1337dca.png" width="250" /><p>
# Robyn
[](https://twitter.com/robyn_oss)
[](https://pepy.tech/project/robyn)
[](https://github.com/sansyrox/robyn/releases/)
[](#license)

[](https://sansyrox.github.io/robyn/#/)
[](https://discord.gg/rkERZ5eNU8)
Robyn is a High-Performance, Community-Driven, and Innovator Friendly Web Framework with a Rust runtime. You can learn more by checking our [community resources](https://sansyrox.github.io/robyn/#/community-resources)!
## 📦 Installation
You can simply use Pip for installation.
```
pip install robyn
```
Or, with [conda-forge](https://conda-forge.org/)
```
conda install -c conda-forge robyn
```
## 🤔 Usage
### 🚀 Define your API
To define your API, you can add the following code in an `app.py` file.
```python
from robyn import Robyn
app = Robyn(__file__)
@app.get("/")
async def h(request):
return "Hello, world!"
app.start(port=8080)
```
### 🏃 Run your code
Simply run the app.py file you created. You will then have access to a server on the `localhost:8080`, that you can request from an other program. Robyn provides several options to customize your web server.
```
$ python3 app.py
```
To see the usage
```
usage: app.py [-h] [--processes PROCESSES] [--workers WORKERS] [--dev] [--log-level LOG_LEVEL]
Robyn, a fast async web framework with a rust runtime.
options:
-h, --help show this help message and exit
--processes PROCESSES Choose the number of processes. [Default: 1]
--workers WORKERS Choose the number of workers. [Default: 1]
--dev Development mode. It restarts the server based on file changes.
--log-level LOG_LEVEL Set the log level name
```
Log level can be `DEBUG`, `INFO`, `WARNING`, or `ERROR`.
### 💻 Add more routes
You can add more routes to your API. Check out the routes in [this file](https://github.com/sansyrox/robyn/blob/main/integration_tests/base_routes.py) as examples.
## 🐍 Python Version Support
Robyn is compatible with the following Python versions:
> Python >= 3.7
It is recommended to use the latest version of Python for the best performances.
Please make sure you have the correct version of Python installed before starting to use
this project. You can check your Python version by running the following command in your
terminal:
```bash
python --version
```
## 💡 Features
- Under active development!
- Written in Rust, btw xD
- A multithreaded Runtime
- Extensible
- A simple API
- Sync and Async Function Support
- Dynamic URL Routing
- Multi Core Scaling
- WebSockets!
- Middlewares
- Hot Reloading
- Community First and truly FOSS!
## 🗒️ How to contribute
### 🏁 Get started
Please read the [code of conduct](https://github.com/sansyrox/robyn/blob/main/CODE_OF_CONDUCT.md) and go through [CONTRIBUTING.md](https://github.com/sansyrox/robyn/blob/main/CONTRIBUTING.md) before contributing to Robyn.
Feel free to open an issue for any clarifications or suggestions.
If you're feeling curious. You can take a look at a more detailed architecture [here](https://sansyrox.github.io/robyn/#/architecture).
If you still need help to get started, feel free to reach out on our [community discord](https://discord.gg/rkERZ5eNU8).
### ⚙️ To Develop Locally
1. Install the development dependencies (preferably inside a virtual environment): `pip install -r dev-requirements.txt`
2. Install the pre-commit git hooks: `pre-commit install`
3. Run `maturin develop` or `maturin develop --cargo-extra-args="--features=io-uring"` for using the experimental version of actix-web. This command will build the Robyn Rust package and install it in your virtual environment.
4. Run `python3 integration_tests/base_routes.py`. This file contains several examples of routes we use for testing purposes. You can modify or add some to your likings.
You can then request the server you ran from an other terminal. Here is a `GET` request done using [curl](https://curl.se/) for example:
```bash
curl http://localhost:8080/sync/str
```
## ✨ Special thanks
### ✨ Contributors/Supporters
Thanks to all the contributors of the project. Robyn will not be what it is without all your support :heart:.
<a href="https://github.com/sansyrox/robyn/graphs/contributors">
<img src="https://contrib.rocks/image?repo=sansyrox/robyn" />
</a>
Special thanks to the [PyO3](https://pyo3.rs/v0.13.2/) community and [Andrew from PyO3-asyncio](https://github.com/awestlake87/pyo3-asyncio) for their amazing libraries and their support for my queries. 💖
### ✨ Sponsors
These sponsors help us make the magic happen!
[](https://www.digitalocean.com/?refcode=3f2b9fd4968d&utm_campaign=Referral_Invite&utm_medium=Referral_Program&utm_source=badge)
[](https://github.com/appwrite)
- [Shivay Lamba](https://github.com/shivaylamba)
## Star History
[](https://star-history.com/#sansyrox/robyn&Date)
| /robyn-0.30.0.tar.gz/robyn-0.30.0/README.md | 0.489503 | 0.899916 | README.md | pypi |
# Changelog
## [Unreleased](https://github.com/sansyrox/robyn/tree/HEAD)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.26.1...HEAD)
**Closed issues:**
- Payload reached size limit. [\#463](https://github.com/sansyrox/robyn/issues/463)
- Proposal to rename `params` with `path_params` [\#457](https://github.com/sansyrox/robyn/issues/457)
**Merged pull requests:**
- feat: allow configurable payload sizes [\#465](https://github.com/sansyrox/robyn/pull/465) ([sansyrox](https://github.com/sansyrox))
- docs: remove test pypi instructions from pr template [\#462](https://github.com/sansyrox/robyn/pull/462) ([sansyrox](https://github.com/sansyrox))
- Rename params with path\_params [\#460](https://github.com/sansyrox/robyn/pull/460) ([carlosm27](https://github.com/carlosm27))
- feat: Implement global CORS [\#458](https://github.com/sansyrox/robyn/pull/458) ([sansyrox](https://github.com/sansyrox))
## [v0.26.1](https://github.com/sansyrox/robyn/tree/v0.26.1) (2023-04-05)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.26.0...v0.26.1)
**Fixed bugs:**
- Can't access new or updated route while on dev option [\#439](https://github.com/sansyrox/robyn/issues/439)
**Closed issues:**
- Add documentation for `robyn.env` file [\#454](https://github.com/sansyrox/robyn/issues/454)
**Merged pull requests:**
- Release v0.26.1 [\#461](https://github.com/sansyrox/robyn/pull/461) ([sansyrox](https://github.com/sansyrox))
- \[pre-commit.ci\] pre-commit autoupdate [\#459](https://github.com/sansyrox/robyn/pull/459) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci))
- \[pre-commit.ci\] pre-commit autoupdate [\#452](https://github.com/sansyrox/robyn/pull/452) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci))
- docs: Add docs for v0.26.0 [\#451](https://github.com/sansyrox/robyn/pull/451) ([sansyrox](https://github.com/sansyrox))
- fix\(dev\): fix hot reloading with dev flag [\#446](https://github.com/sansyrox/robyn/pull/446) ([AntoineRR](https://github.com/AntoineRR))
## [v0.26.0](https://github.com/sansyrox/robyn/tree/v0.26.0) (2023-03-24)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.25.0...v0.26.0)
**Implemented enhancements:**
- \[Feature Request\] Robyn providing Status Codes? [\#423](https://github.com/sansyrox/robyn/issues/423)
- \[Feature Request\] Allow global level Response headers [\#335](https://github.com/sansyrox/robyn/issues/335)
**Fixed bugs:**
- \[BUG\] `uvloop` ModuleNotFoundError: No module named 'uvloop' on Ubuntu Docker Image [\#395](https://github.com/sansyrox/robyn/issues/395)
**Closed issues:**
- \[Feature Request\] When Robyn can have a middleware mechanism like flask or django [\#350](https://github.com/sansyrox/robyn/issues/350)
- Forced shutdown locks console. \[BUG\] [\#317](https://github.com/sansyrox/robyn/issues/317)
**Merged pull requests:**
- \[pre-commit.ci\] pre-commit autoupdate [\#449](https://github.com/sansyrox/robyn/pull/449) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci))
- fix: Implement auto installation of uvloop on linux arm [\#445](https://github.com/sansyrox/robyn/pull/445) ([sansyrox](https://github.com/sansyrox))
- chore: update rust dependencies [\#444](https://github.com/sansyrox/robyn/pull/444) ([AntoineRR](https://github.com/AntoineRR))
- feat: Implement performance benchmarking [\#443](https://github.com/sansyrox/robyn/pull/443) ([sansyrox](https://github.com/sansyrox))
- feat: expose request/connection info [\#441](https://github.com/sansyrox/robyn/pull/441) ([r3b-fish](https://github.com/r3b-fish))
- Install the CodeSee workflow. [\#438](https://github.com/sansyrox/robyn/pull/438) ([codesee-maps[bot]](https://github.com/apps/codesee-maps))
- \[pre-commit.ci\] pre-commit autoupdate [\#437](https://github.com/sansyrox/robyn/pull/437) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci))
- Replace integer status codes with Enum values of StatusCodes [\#436](https://github.com/sansyrox/robyn/pull/436) ([Noborita9](https://github.com/Noborita9))
- added `star-history` [\#434](https://github.com/sansyrox/robyn/pull/434) ([hemangjoshi37a](https://github.com/hemangjoshi37a))
- \[pre-commit.ci\] pre-commit autoupdate [\#433](https://github.com/sansyrox/robyn/pull/433) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci))
- feat: Robyn providing status codes [\#429](https://github.com/sansyrox/robyn/pull/429) ([carlosm27](https://github.com/carlosm27))
- feat: Allow global level Response headers [\#410](https://github.com/sansyrox/robyn/pull/410) ([ParthS007](https://github.com/ParthS007))
- feat: get rid of intermediate representations of requests and responses [\#397](https://github.com/sansyrox/robyn/pull/397) ([AntoineRR](https://github.com/AntoineRR))
## [v0.25.0](https://github.com/sansyrox/robyn/tree/v0.25.0) (2023-02-20)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.24.1...v0.25.0)
**Implemented enhancements:**
- using robyn with some frameworks [\#420](https://github.com/sansyrox/robyn/issues/420)
**Fixed bugs:**
- Template Rendering is not working in some browsers [\#426](https://github.com/sansyrox/robyn/issues/426)
**Closed issues:**
- \[Feature Request\] Show support for Python versions in the README [\#396](https://github.com/sansyrox/robyn/issues/396)
- \[BUG\] The dev flag doesn't set the log level to DEBUG [\#385](https://github.com/sansyrox/robyn/issues/385)
- \[BUG\] All tests are not passing on windows [\#372](https://github.com/sansyrox/robyn/issues/372)
- \[Feature Request\] Add views/view controllers [\#221](https://github.com/sansyrox/robyn/issues/221)
**Merged pull requests:**
- fix: Add proper headers to the templates return types [\#427](https://github.com/sansyrox/robyn/pull/427) ([sansyrox](https://github.com/sansyrox))
- \[pre-commit.ci\] pre-commit autoupdate [\#425](https://github.com/sansyrox/robyn/pull/425) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci))
- docs: Add documentation for views [\#424](https://github.com/sansyrox/robyn/pull/424) ([sansyrox](https://github.com/sansyrox))
- better way to compare type [\#421](https://github.com/sansyrox/robyn/pull/421) ([jmishra01](https://github.com/jmishra01))
- style\(landing\_page\): fix the style of github logo on the landing page [\#419](https://github.com/sansyrox/robyn/pull/419) ([sansyrox](https://github.com/sansyrox))
- docs: improve readme [\#418](https://github.com/sansyrox/robyn/pull/418) ([AntoineRR](https://github.com/AntoineRR))
- docs: add dark mode to website [\#416](https://github.com/sansyrox/robyn/pull/416) ([AntoineRR](https://github.com/AntoineRR))
- chore: improve issue templates [\#413](https://github.com/sansyrox/robyn/pull/413) ([AntoineRR](https://github.com/AntoineRR))
- \[pre-commit.ci\] pre-commit autoupdate [\#412](https://github.com/sansyrox/robyn/pull/412) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci))
- fix: fixed CONTRIBUTE.md link into docs/README.md file, changing it f… [\#411](https://github.com/sansyrox/robyn/pull/411) ([Kop3sh](https://github.com/Kop3sh))
- chore\(ci\): fix rust ci warnings [\#408](https://github.com/sansyrox/robyn/pull/408) ([AntoineRR](https://github.com/AntoineRR))
- feat: Add view controllers [\#407](https://github.com/sansyrox/robyn/pull/407) ([mikaeelghr](https://github.com/mikaeelghr))
- Fix docs: support version [\#404](https://github.com/sansyrox/robyn/pull/404) ([Oluwaseun241](https://github.com/Oluwaseun241))
- fix: Fix Windows tests [\#402](https://github.com/sansyrox/robyn/pull/402) ([sansyrox](https://github.com/sansyrox))
- docs: Update PyPi metadata [\#401](https://github.com/sansyrox/robyn/pull/401) ([sansyrox](https://github.com/sansyrox))
- fix\(test\): fix tests on windows [\#400](https://github.com/sansyrox/robyn/pull/400) ([AntoineRR](https://github.com/AntoineRR))
- fix: various improvements around the dev flag [\#388](https://github.com/sansyrox/robyn/pull/388) ([AntoineRR](https://github.com/AntoineRR))
## [v0.24.1](https://github.com/sansyrox/robyn/tree/v0.24.1) (2023-02-09)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.24.0...v0.24.1)
**Closed issues:**
- \[BUG\] \[Windows\] Terminal hanging after Ctrl+C is pressed on Robyn server [\#373](https://github.com/sansyrox/robyn/issues/373)
**Merged pull requests:**
- \[pre-commit.ci\] pre-commit autoupdate [\#394](https://github.com/sansyrox/robyn/pull/394) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci))
- docs: add documentation regarding byte response [\#392](https://github.com/sansyrox/robyn/pull/392) ([sansyrox](https://github.com/sansyrox))
- fix: fix terminal hijacking in windows [\#391](https://github.com/sansyrox/robyn/pull/391) ([sansyrox](https://github.com/sansyrox))
- chore: fix requirements files and update packages [\#389](https://github.com/sansyrox/robyn/pull/389) ([AntoineRR](https://github.com/AntoineRR))
- small correction in docs [\#387](https://github.com/sansyrox/robyn/pull/387) ([tkanhe](https://github.com/tkanhe))
- \[pre-commit.ci\] pre-commit autoupdate [\#384](https://github.com/sansyrox/robyn/pull/384) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci))
- ci: build artifacts on every push and pull [\#378](https://github.com/sansyrox/robyn/pull/378) ([sansyrox](https://github.com/sansyrox))
- test: organize and add tests [\#377](https://github.com/sansyrox/robyn/pull/377) ([AntoineRR](https://github.com/AntoineRR))
- Changed Response to use body: bytes [\#375](https://github.com/sansyrox/robyn/pull/375) ([madhavajay](https://github.com/madhavajay))
## [v0.24.0](https://github.com/sansyrox/robyn/tree/v0.24.0) (2023-02-06)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.23.1...v0.24.0)
**Closed issues:**
- \[BUG\] Release builds are not working [\#386](https://github.com/sansyrox/robyn/issues/386)
- \[BUG\] Can't send raw bytes [\#374](https://github.com/sansyrox/robyn/issues/374)
## [v0.23.1](https://github.com/sansyrox/robyn/tree/v0.23.1) (2023-01-30)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.23.0...v0.23.1)
**Closed issues:**
- \[BUG\] Return 500 status code when route is raising [\#381](https://github.com/sansyrox/robyn/issues/381)
- \[BUG\] Return 404 status code when route isn't set [\#376](https://github.com/sansyrox/robyn/issues/376)
- Add Appwrite as a sponsor in the README [\#348](https://github.com/sansyrox/robyn/issues/348)
- \[BUG\] Get Stared failed on Windows [\#340](https://github.com/sansyrox/robyn/issues/340)
- \[BUG\] Fix CI/CD pipeline [\#310](https://github.com/sansyrox/robyn/issues/310)
**Merged pull requests:**
- chore\(ci\): fix robyn installation in test CI [\#383](https://github.com/sansyrox/robyn/pull/383) ([AntoineRR](https://github.com/AntoineRR))
- fix: return 500 status code when route raise [\#382](https://github.com/sansyrox/robyn/pull/382) ([AntoineRR](https://github.com/AntoineRR))
- fix: return 404 status code when route isn't found [\#380](https://github.com/sansyrox/robyn/pull/380) ([AntoineRR](https://github.com/AntoineRR))
- ci: enable precommit hooks on everything [\#371](https://github.com/sansyrox/robyn/pull/371) ([sansyrox](https://github.com/sansyrox))
- chore: run tests on linux, macos and windows and release builds on ta… [\#370](https://github.com/sansyrox/robyn/pull/370) ([AntoineRR](https://github.com/AntoineRR))
- docs: add appwrite logo as sponsors [\#369](https://github.com/sansyrox/robyn/pull/369) ([sansyrox](https://github.com/sansyrox))
- test: improve pytest fixtures [\#368](https://github.com/sansyrox/robyn/pull/368) ([AntoineRR](https://github.com/AntoineRR))
- Move pre-commit hooks to use Ruff [\#364](https://github.com/sansyrox/robyn/pull/364) ([patrick91](https://github.com/patrick91))
## [v0.23.0](https://github.com/sansyrox/robyn/tree/v0.23.0) (2023-01-21)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.22.1...v0.23.0)
**Closed issues:**
- \[Feature Request\] Improve the release and testing pipeline [\#341](https://github.com/sansyrox/robyn/issues/341)
**Merged pull requests:**
- ci: delete the test pypi workflow [\#367](https://github.com/sansyrox/robyn/pull/367) ([sansyrox](https://github.com/sansyrox))
- docs: Add page icon to index page [\#365](https://github.com/sansyrox/robyn/pull/365) ([Abdur-rahmaanJ](https://github.com/Abdur-rahmaanJ))
- test: speed up tests [\#362](https://github.com/sansyrox/robyn/pull/362) ([AntoineRR](https://github.com/AntoineRR))
- Replace the default port with 8080 [\#352](https://github.com/sansyrox/robyn/pull/352) ([sansyrox](https://github.com/sansyrox))
## [v0.22.1](https://github.com/sansyrox/robyn/tree/v0.22.1) (2023-01-16)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.22.0...v0.22.1)
**Closed issues:**
- \[BUG\] Python 3.11 error: metadata-generation-failed [\#357](https://github.com/sansyrox/robyn/issues/357)
**Merged pull requests:**
- ci: update precommit config [\#361](https://github.com/sansyrox/robyn/pull/361) ([sansyrox](https://github.com/sansyrox))
- \[pre-commit.ci\] pre-commit autoupdate [\#359](https://github.com/sansyrox/robyn/pull/359) ([pre-commit-ci[bot]](https://github.com/apps/pre-commit-ci))
- chore\(ci\): add python 3.11 to the build and test CI [\#358](https://github.com/sansyrox/robyn/pull/358) ([AntoineRR](https://github.com/AntoineRR))
- Updates prose to format code block and docs [\#356](https://github.com/sansyrox/robyn/pull/356) ([rachfop](https://github.com/rachfop))
## [v0.22.0](https://github.com/sansyrox/robyn/tree/v0.22.0) (2023-01-14)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.21.0...v0.22.0)
**Closed issues:**
- AttributeError: 'Robyn' object has no attribute 'headers'\[BUG\] [\#353](https://github.com/sansyrox/robyn/issues/353)
- \[Feature Request\] Allow support for multiple file types [\#344](https://github.com/sansyrox/robyn/issues/344)
- \[Feature Request\] Investigate if we need an unit tests for Python functions created in Rust [\#311](https://github.com/sansyrox/robyn/issues/311)
- \[Experimental Feature Request\] Story driven programming [\#258](https://github.com/sansyrox/robyn/issues/258)
**Merged pull requests:**
- fix: windows support [\#354](https://github.com/sansyrox/robyn/pull/354) ([sansyrox](https://github.com/sansyrox))
- fix: better handling of route return type [\#349](https://github.com/sansyrox/robyn/pull/349) ([AntoineRR](https://github.com/AntoineRR))
## [v0.21.0](https://github.com/sansyrox/robyn/tree/v0.21.0) (2023-01-06)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.20.0...v0.21.0)
**Closed issues:**
- \[Feature Request\] Support for image file type [\#343](https://github.com/sansyrox/robyn/issues/343)
- Not able to see the added logs [\#342](https://github.com/sansyrox/robyn/issues/342)
- \[Feature Request\] Hope robyn can support returning f-string format [\#338](https://github.com/sansyrox/robyn/issues/338)
- \[Feature Request\] Refactor Robyn response to allow objects other than strings [\#336](https://github.com/sansyrox/robyn/issues/336)
- \[BUG\] Custom headers not sent when const=False [\#323](https://github.com/sansyrox/robyn/issues/323)
- \[Feature Request\] Add documentation for custom template support in v0.19.0 [\#321](https://github.com/sansyrox/robyn/issues/321)
- \[BUG\] Always need to return a string in a route [\#305](https://github.com/sansyrox/robyn/issues/305)
**Merged pull requests:**
- fix: fix the static file serving [\#347](https://github.com/sansyrox/robyn/pull/347) ([sansyrox](https://github.com/sansyrox))
- feat: return Response from routes [\#346](https://github.com/sansyrox/robyn/pull/346) ([AntoineRR](https://github.com/AntoineRR))
## [v0.20.0](https://github.com/sansyrox/robyn/tree/v0.20.0) (2022-12-20)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.19.2...v0.20.0)
**Closed issues:**
- \[Feature Request\] Add an automated benchmark script [\#320](https://github.com/sansyrox/robyn/issues/320)
**Merged pull requests:**
- feat: allow non string types in response [\#337](https://github.com/sansyrox/robyn/pull/337) ([sansyrox](https://github.com/sansyrox))
- feat: add an auto benchmark script [\#329](https://github.com/sansyrox/robyn/pull/329) ([AntoineRR](https://github.com/AntoineRR))
## [v0.19.2](https://github.com/sansyrox/robyn/tree/v0.19.2) (2022-12-14)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.19.1...v0.19.2)
**Closed issues:**
- \[BUG\] The --dev flag not working on Ubuntu 20.04 [\#332](https://github.com/sansyrox/robyn/issues/332)
- \[Feature Request\] Allow the ability of sending the headers from the same route [\#325](https://github.com/sansyrox/robyn/issues/325)
**Merged pull requests:**
- fix: allow response headers and fix headers not working in const requests [\#331](https://github.com/sansyrox/robyn/pull/331) ([sansyrox](https://github.com/sansyrox))
- fix: factorizing code [\#322](https://github.com/sansyrox/robyn/pull/322) ([AntoineRR](https://github.com/AntoineRR))
## [v0.19.1](https://github.com/sansyrox/robyn/tree/v0.19.1) (2022-12-03)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.19.0...v0.19.1)
## [v0.19.0](https://github.com/sansyrox/robyn/tree/v0.19.0) (2022-12-02)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.18.3...v0.19.0)
**Closed issues:**
- \[Feature Request\] Allow the ability of sending the headers from the same route [\#326](https://github.com/sansyrox/robyn/issues/326)
- \[Feature Request\] Allow the ability of sending the headers from the same route [\#324](https://github.com/sansyrox/robyn/issues/324)
- \[BUG\] Error in Examples section in Documentation [\#314](https://github.com/sansyrox/robyn/issues/314)
- \[BUG\] Wrong link for the blog post on Robyn [\#306](https://github.com/sansyrox/robyn/issues/306)
- Add documentation about deployment [\#93](https://github.com/sansyrox/robyn/issues/93)
- Add support for templates! [\#10](https://github.com/sansyrox/robyn/issues/10)
**Merged pull requests:**
- docs: update hosting docs [\#319](https://github.com/sansyrox/robyn/pull/319) ([sansyrox](https://github.com/sansyrox))
- Various improvements around the index method [\#318](https://github.com/sansyrox/robyn/pull/318) ([AntoineRR](https://github.com/AntoineRR))
- Add Railway deployment process. [\#316](https://github.com/sansyrox/robyn/pull/316) ([carlosm27](https://github.com/carlosm27))
- docs: fix middleware section in examples [\#315](https://github.com/sansyrox/robyn/pull/315) ([sansyrox](https://github.com/sansyrox))
- docs: fix blog link in website [\#309](https://github.com/sansyrox/robyn/pull/309) ([sansyrox](https://github.com/sansyrox))
- Router refactor [\#307](https://github.com/sansyrox/robyn/pull/307) ([AntoineRR](https://github.com/AntoineRR))
## [v0.18.3](https://github.com/sansyrox/robyn/tree/v0.18.3) (2022-11-10)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.18.2...v0.18.3)
**Closed issues:**
- \[BUG\] `--log-level` not working [\#300](https://github.com/sansyrox/robyn/issues/300)
- \[Feature Request\] Refactor Code to include better types [\#254](https://github.com/sansyrox/robyn/issues/254)
**Merged pull requests:**
- fix: log level not working [\#303](https://github.com/sansyrox/robyn/pull/303) ([sansyrox](https://github.com/sansyrox))
- add route type enum [\#299](https://github.com/sansyrox/robyn/pull/299) ([suhailmalik07](https://github.com/suhailmalik07))
## [v0.18.2](https://github.com/sansyrox/robyn/tree/v0.18.2) (2022-11-05)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.18.1...v0.18.2)
**Closed issues:**
- \[Feature Request?\] Update `matchit` crate to the most recent version [\#291](https://github.com/sansyrox/robyn/issues/291)
- \[Feature Request\] Add `@wraps` in route dectorators [\#285](https://github.com/sansyrox/robyn/issues/285)
- \[Feature Request\] fix clippy issues [\#265](https://github.com/sansyrox/robyn/issues/265)
**Merged pull requests:**
- style: add logging for url port and host [\#304](https://github.com/sansyrox/robyn/pull/304) ([sansyrox](https://github.com/sansyrox))
- fix config of port and url [\#302](https://github.com/sansyrox/robyn/pull/302) ([kimhyun5u](https://github.com/kimhyun5u))
- update rust packages to latest [\#298](https://github.com/sansyrox/robyn/pull/298) ([suhailmalik07](https://github.com/suhailmalik07))
- fix: retain metadata of the route functions [\#295](https://github.com/sansyrox/robyn/pull/295) ([sansyrox](https://github.com/sansyrox))
- `SocketHeld::new` refactor [\#294](https://github.com/sansyrox/robyn/pull/294) ([Jamyw7g](https://github.com/Jamyw7g))
## [v0.18.1](https://github.com/sansyrox/robyn/tree/v0.18.1) (2022-10-23)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.18.0...v0.18.1)
**Merged pull requests:**
- fix: replaced match with if let [\#293](https://github.com/sansyrox/robyn/pull/293) ([Markaeus](https://github.com/Markaeus))
- Hotfix detecting robyn.env [\#292](https://github.com/sansyrox/robyn/pull/292) ([Shending-Help](https://github.com/Shending-Help))
- fix: enable hot reload on windows [\#290](https://github.com/sansyrox/robyn/pull/290) ([guilefoylegaurav](https://github.com/guilefoylegaurav))
## [v0.18.0](https://github.com/sansyrox/robyn/tree/v0.18.0) (2022-10-12)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.17.5...v0.18.0)
**Closed issues:**
- \[BUG\] The --dev mode spawns more servers without clearing previous ones. [\#249](https://github.com/sansyrox/robyn/issues/249)
- \[Feature\] Add support for Env variables and a robyn.yaml config file [\#97](https://github.com/sansyrox/robyn/issues/97)
**Merged pull requests:**
- testing env support [\#288](https://github.com/sansyrox/robyn/pull/288) ([Shending-Help](https://github.com/Shending-Help))
- Feature add support for env variables [\#286](https://github.com/sansyrox/robyn/pull/286) ([Shending-Help](https://github.com/Shending-Help))
- fix: add proper kill process to conftest. \#249 [\#278](https://github.com/sansyrox/robyn/pull/278) ([guilefoylegaurav](https://github.com/guilefoylegaurav))
## [v0.17.5](https://github.com/sansyrox/robyn/tree/v0.17.5) (2022-09-14)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.17.4...v0.17.5)
**Closed issues:**
- \[BUG\] README.md Discord link is invalid [\#272](https://github.com/sansyrox/robyn/issues/272)
- \[Feature Request\] Add Digital Ocean to list of sponsors in Robyn Docs [\#270](https://github.com/sansyrox/robyn/issues/270)
- \[Feature Request\] Add PyCon USA lightning talk in resources section [\#204](https://github.com/sansyrox/robyn/issues/204)
- \[Feature Request\] Add community/ resources section in Docs or README [\#203](https://github.com/sansyrox/robyn/issues/203)
- \[Feature Request\] Update the new architecture on the docs website [\#191](https://github.com/sansyrox/robyn/issues/191)
- Add examples section [\#101](https://github.com/sansyrox/robyn/issues/101)
**Merged pull requests:**
- Don't run sync functions in pool [\#282](https://github.com/sansyrox/robyn/pull/282) ([JackThomson2](https://github.com/JackThomson2))
- Add documentation of Adding GraphQL support | version 1 [\#275](https://github.com/sansyrox/robyn/pull/275) ([sansyrox](https://github.com/sansyrox))
- docs: improve documentation [\#269](https://github.com/sansyrox/robyn/pull/269) ([sansyrox](https://github.com/sansyrox))
## [v0.17.4](https://github.com/sansyrox/robyn/tree/v0.17.4) (2022-08-25)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.17.3...v0.17.4)
**Closed issues:**
- \[BUG?\] Startup failure OSError: \[WinError 87\] The parameter is incorrect [\#252](https://github.com/sansyrox/robyn/issues/252)
- \[Feature Request\] Add mypy for pyi\(stubs\) synchronisation [\#226](https://github.com/sansyrox/robyn/issues/226)
- not working on mac/windows [\#140](https://github.com/sansyrox/robyn/issues/140)
**Merged pull requests:**
- Father, forgive me, for I am adding inline types. [\#266](https://github.com/sansyrox/robyn/pull/266) ([sansyrox](https://github.com/sansyrox))
## [v0.17.3](https://github.com/sansyrox/robyn/tree/v0.17.3) (2022-08-17)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.17.2...v0.17.3)
**Merged pull requests:**
- fix: parse int status code to str [\#264](https://github.com/sansyrox/robyn/pull/264) ([hougesen](https://github.com/hougesen))
## [v0.17.2](https://github.com/sansyrox/robyn/tree/v0.17.2) (2022-08-11)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.17.1...v0.17.2)
**Fixed bugs:**
- Cannot run Robyn on Windows [\#139](https://github.com/sansyrox/robyn/issues/139)
**Closed issues:**
- \[BUG\] Move away from circle ci [\#240](https://github.com/sansyrox/robyn/issues/240)
- Migrate the community to discord [\#239](https://github.com/sansyrox/robyn/issues/239)
- \[Feature Request\] Release on test pypi before releasing on the main PyPi [\#224](https://github.com/sansyrox/robyn/issues/224)
- For 0.8.x [\#75](https://github.com/sansyrox/robyn/issues/75)
- Add a layer of caching in front of router [\#59](https://github.com/sansyrox/robyn/issues/59)
**Merged pull requests:**
- Windows fix [\#261](https://github.com/sansyrox/robyn/pull/261) ([sansyrox](https://github.com/sansyrox))
- ci: enable fail fast for faster response time in the pipelines [\#260](https://github.com/sansyrox/robyn/pull/260) ([sansyrox](https://github.com/sansyrox))
- ci: add github actions to publish every PR on test pypi [\#259](https://github.com/sansyrox/robyn/pull/259) ([sansyrox](https://github.com/sansyrox))
- Fix typo in README [\#246](https://github.com/sansyrox/robyn/pull/246) ([bartbroere](https://github.com/bartbroere))
- chore\(ci\): move pytest from CircleCi to Github Actions [\#241](https://github.com/sansyrox/robyn/pull/241) ([AntoineRR](https://github.com/AntoineRR))
## [v0.17.1](https://github.com/sansyrox/robyn/tree/v0.17.1) (2022-07-19)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.17.0...v0.17.1)
**Closed issues:**
- \[Feature Request\] add clippy in ci [\#236](https://github.com/sansyrox/robyn/issues/236)
- \[BUG\] Headers not available [\#231](https://github.com/sansyrox/robyn/issues/231)
- \[Feature Request\] Add an all contributor bot in the README of the repo [\#225](https://github.com/sansyrox/robyn/issues/225)
**Merged pull requests:**
- Add Rust CI [\#237](https://github.com/sansyrox/robyn/pull/237) ([AntoineRR](https://github.com/AntoineRR))
- Contributors added in Readme [\#235](https://github.com/sansyrox/robyn/pull/235) ([orvil1026](https://github.com/orvil1026))
- fix external project link in README [\#234](https://github.com/sansyrox/robyn/pull/234) ([touilleMan](https://github.com/touilleMan))
- fix: fix request headers not being propagated [\#232](https://github.com/sansyrox/robyn/pull/232) ([sansyrox](https://github.com/sansyrox))
- Upgrade GitHub Actions and add Python 3.10 [\#230](https://github.com/sansyrox/robyn/pull/230) ([cclauss](https://github.com/cclauss))
- OrbUp: Upgrade the CircleCI Orbs [\#229](https://github.com/sansyrox/robyn/pull/229) ([cclauss](https://github.com/cclauss))
- CHANGELOG.md: Fix typo [\#228](https://github.com/sansyrox/robyn/pull/228) ([cclauss](https://github.com/cclauss))
## [v0.17.0](https://github.com/sansyrox/robyn/tree/v0.17.0) (2022-07-06)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.16.6...v0.17.0)
**Closed issues:**
- A refactor [\#176](https://github.com/sansyrox/robyn/issues/176)
- \[Proposal\] Const Requests [\#48](https://github.com/sansyrox/robyn/issues/48)
**Merged pull requests:**
- Add a const router [\#210](https://github.com/sansyrox/robyn/pull/210) ([sansyrox](https://github.com/sansyrox))
## [v0.16.6](https://github.com/sansyrox/robyn/tree/v0.16.6) (2022-07-02)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.16.5...v0.16.6)
## [v0.16.5](https://github.com/sansyrox/robyn/tree/v0.16.5) (2022-07-01)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.16.4...v0.16.5)
**Closed issues:**
- \[Feature Request\] Add sponsors in the repo and website [\#212](https://github.com/sansyrox/robyn/issues/212)
- \[Feature Request\] Add commitizen as a dev dependency [\#211](https://github.com/sansyrox/robyn/issues/211)
- Add better logging [\#158](https://github.com/sansyrox/robyn/issues/158)
- Remove freeport dependency [\#151](https://github.com/sansyrox/robyn/issues/151)
- Add websocket support [\#104](https://github.com/sansyrox/robyn/issues/104)
- Maintenance issue [\#56](https://github.com/sansyrox/robyn/issues/56)
- Improve Readme [\#4](https://github.com/sansyrox/robyn/issues/4)
**Merged pull requests:**
- fix: Fixes the crashing dev mode [\#218](https://github.com/sansyrox/robyn/pull/218) ([sansyrox](https://github.com/sansyrox))
- feat: add commitizen as a dev dependency [\#216](https://github.com/sansyrox/robyn/pull/216) ([sansyrox](https://github.com/sansyrox))
- Isort imports [\#205](https://github.com/sansyrox/robyn/pull/205) ([sansyrox](https://github.com/sansyrox))
- Add bridged logger. Improves performance substantially. [\#201](https://github.com/sansyrox/robyn/pull/201) ([sansyrox](https://github.com/sansyrox))
- Adds pre-commit hooks for black, flake8, isort [\#198](https://github.com/sansyrox/robyn/pull/198) ([chrismoradi](https://github.com/chrismoradi))
- Resolves port open issue when app is killed \#183 [\#196](https://github.com/sansyrox/robyn/pull/196) ([anandtripathi5](https://github.com/anandtripathi5))
- Removing unwraps [\#195](https://github.com/sansyrox/robyn/pull/195) ([sansyrox](https://github.com/sansyrox))
## [v0.16.4](https://github.com/sansyrox/robyn/tree/v0.16.4) (2022-05-30)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.16.3...v0.16.4)
**Closed issues:**
- \[Feature Request\] Remove extra logs [\#200](https://github.com/sansyrox/robyn/issues/200)
- \[Feature Request\] Add precommit hook for black, flake8 and isort [\#194](https://github.com/sansyrox/robyn/issues/194)
- \[BUG\] Get rid of Hashmap Clones and Unwraps! [\#186](https://github.com/sansyrox/robyn/issues/186)
## [v0.16.3](https://github.com/sansyrox/robyn/tree/v0.16.3) (2022-05-18)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.16.2...v0.16.3)
**Closed issues:**
- \[BUG\] Port not being free on app kill [\#183](https://github.com/sansyrox/robyn/issues/183)
- Build failure [\#166](https://github.com/sansyrox/robyn/issues/166)
## [v0.16.2](https://github.com/sansyrox/robyn/tree/v0.16.2) (2022-05-09)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.16.1...v0.16.2)
## [v0.16.1](https://github.com/sansyrox/robyn/tree/v0.16.1) (2022-05-09)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.16.0...v0.16.1)
**Closed issues:**
- Add Python stubs [\#130](https://github.com/sansyrox/robyn/issues/130)
**Merged pull requests:**
- Setup types for Robyn [\#192](https://github.com/sansyrox/robyn/pull/192) ([sansyrox](https://github.com/sansyrox))
- Fix build pipeline [\#190](https://github.com/sansyrox/robyn/pull/190) ([sansyrox](https://github.com/sansyrox))
- fix typo :pencil2: in api docs. [\#189](https://github.com/sansyrox/robyn/pull/189) ([sombralibre](https://github.com/sombralibre))
- Remove hashmap clones [\#187](https://github.com/sansyrox/robyn/pull/187) ([sansyrox](https://github.com/sansyrox))
- Code clean up | Modularise rust code [\#185](https://github.com/sansyrox/robyn/pull/185) ([sansyrox](https://github.com/sansyrox))
- Add experimental io-uring [\#184](https://github.com/sansyrox/robyn/pull/184) ([sansyrox](https://github.com/sansyrox))
- Implement Response headers [\#179](https://github.com/sansyrox/robyn/pull/179) ([sansyrox](https://github.com/sansyrox))
- Code cleanup [\#178](https://github.com/sansyrox/robyn/pull/178) ([sansyrox](https://github.com/sansyrox))
## [v0.16.0](https://github.com/sansyrox/robyn/tree/v0.16.0) (2022-04-29)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.15.1...v0.16.0)
**Closed issues:**
- \[Feature Request\] Add list of sponsors on the project website [\#182](https://github.com/sansyrox/robyn/issues/182)
- Optional build feature for io\_uring [\#177](https://github.com/sansyrox/robyn/issues/177)
- Create Custom headers for the response. [\#174](https://github.com/sansyrox/robyn/issues/174)
## [v0.15.1](https://github.com/sansyrox/robyn/tree/v0.15.1) (2022-03-24)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.15.0...v0.15.1)
**Closed issues:**
- Add middleware support [\#95](https://github.com/sansyrox/robyn/issues/95)
**Merged pull requests:**
- Make websocket id accessible [\#173](https://github.com/sansyrox/robyn/pull/173) ([sansyrox](https://github.com/sansyrox))
- Use Clippy tool optimized code [\#171](https://github.com/sansyrox/robyn/pull/171) ([mrxiaozhuox](https://github.com/mrxiaozhuox))
- Modify headers [\#170](https://github.com/sansyrox/robyn/pull/170) ([sansyrox](https://github.com/sansyrox))
- Update README.md [\#168](https://github.com/sansyrox/robyn/pull/168) ([Polokghosh53](https://github.com/Polokghosh53))
## [v0.15.0](https://github.com/sansyrox/robyn/tree/v0.15.0) (2022-03-17)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.14.0...v0.15.0)
**Closed issues:**
- \[BUG\] Unable to modify headers in middlewares [\#167](https://github.com/sansyrox/robyn/issues/167)
- Add Pycon talk link to docs [\#102](https://github.com/sansyrox/robyn/issues/102)
## [v0.14.0](https://github.com/sansyrox/robyn/tree/v0.14.0) (2022-03-03)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.13.1...v0.14.0)
**Fixed bugs:**
- Build error [\#161](https://github.com/sansyrox/robyn/issues/161)
**Merged pull requests:**
- Implement Custom Response objects. [\#165](https://github.com/sansyrox/robyn/pull/165) ([sansyrox](https://github.com/sansyrox))
- Remove deprecated endpoints [\#162](https://github.com/sansyrox/robyn/pull/162) ([sansyrox](https://github.com/sansyrox))
- Fix: default url param in app.start [\#160](https://github.com/sansyrox/robyn/pull/160) ([sansyrox](https://github.com/sansyrox))
- Add middlewares [\#157](https://github.com/sansyrox/robyn/pull/157) ([sansyrox](https://github.com/sansyrox))
- Remove arc\(ing\) [\#156](https://github.com/sansyrox/robyn/pull/156) ([sansyrox](https://github.com/sansyrox))
## [v0.13.1](https://github.com/sansyrox/robyn/tree/v0.13.1) (2022-02-19)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.13.0...v0.13.1)
## [v0.13.0](https://github.com/sansyrox/robyn/tree/v0.13.0) (2022-02-15)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.12.1...v0.13.0)
## [v0.12.1](https://github.com/sansyrox/robyn/tree/v0.12.1) (2022-02-13)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.12.0...v0.12.1)
**Closed issues:**
- \[BUG\] Default URL cannot be assigned [\#159](https://github.com/sansyrox/robyn/issues/159)
- Upcoming release\(s\) [\#141](https://github.com/sansyrox/robyn/issues/141)
## [v0.12.0](https://github.com/sansyrox/robyn/tree/v0.12.0) (2022-01-21)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.11.1...v0.12.0)
**Closed issues:**
- Consider adding startup events [\#153](https://github.com/sansyrox/robyn/issues/153)
- Remove poetry dependency [\#150](https://github.com/sansyrox/robyn/issues/150)
**Merged pull requests:**
- Add Event handlers [\#154](https://github.com/sansyrox/robyn/pull/154) ([sansyrox](https://github.com/sansyrox))
- Remove poetry [\#152](https://github.com/sansyrox/robyn/pull/152) ([sansyrox](https://github.com/sansyrox))
- Use print instead of input after starting server [\#149](https://github.com/sansyrox/robyn/pull/149) ([klaa97](https://github.com/klaa97))
- Fix dev server [\#148](https://github.com/sansyrox/robyn/pull/148) ([sansyrox](https://github.com/sansyrox))
- URL queries [\#146](https://github.com/sansyrox/robyn/pull/146) ([patchgamestudio](https://github.com/patchgamestudio))
- Add project wide flake8 settings [\#143](https://github.com/sansyrox/robyn/pull/143) ([sansyrox](https://github.com/sansyrox))
## [v0.11.1](https://github.com/sansyrox/robyn/tree/v0.11.1) (2022-01-11)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.11.0...v0.11.1)
## [v0.11.0](https://github.com/sansyrox/robyn/tree/v0.11.0) (2022-01-07)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.10.0...v0.11.0)
**Fixed bugs:**
- Hot Reloading goes in an infinite loop [\#115](https://github.com/sansyrox/robyn/issues/115)
**Closed issues:**
- Benchmarks to Björn, uvicorn etc. [\#142](https://github.com/sansyrox/robyn/issues/142)
- Add Python linter setup [\#129](https://github.com/sansyrox/robyn/issues/129)
- Add fixtures in testing [\#84](https://github.com/sansyrox/robyn/issues/84)
## [v0.10.0](https://github.com/sansyrox/robyn/tree/v0.10.0) (2021-12-20)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.9.0...v0.10.0)
**Closed issues:**
- Add PyPI classifiers [\#127](https://github.com/sansyrox/robyn/issues/127)
- Robyn version 0.9.0 doesn't work on Mac M1 Models [\#120](https://github.com/sansyrox/robyn/issues/120)
- Inconsistency in steps mentioned in Readme to run locally [\#119](https://github.com/sansyrox/robyn/issues/119)
- Async web socket support [\#116](https://github.com/sansyrox/robyn/issues/116)
- Reveal Logo to be removed from Future Roadmap [\#107](https://github.com/sansyrox/robyn/issues/107)
- Dead Link for Test Drive Button on Robyn Landing Page [\#106](https://github.com/sansyrox/robyn/issues/106)
- Add issue template, pr template and community guidelines [\#105](https://github.com/sansyrox/robyn/issues/105)
- For v0.7.0 [\#72](https://github.com/sansyrox/robyn/issues/72)
- Add better support for requests and response! [\#13](https://github.com/sansyrox/robyn/issues/13)
**Merged pull requests:**
- Add async support in WS [\#134](https://github.com/sansyrox/robyn/pull/134) ([sansyrox](https://github.com/sansyrox))
- Add help messages and simplify 'dev' option [\#128](https://github.com/sansyrox/robyn/pull/128) ([Kludex](https://github.com/Kludex))
- Apply Python highlight on api.md [\#126](https://github.com/sansyrox/robyn/pull/126) ([Kludex](https://github.com/Kludex))
- Update comparison.md [\#124](https://github.com/sansyrox/robyn/pull/124) ([Kludex](https://github.com/Kludex))
- Update comparison.md [\#123](https://github.com/sansyrox/robyn/pull/123) ([Kludex](https://github.com/Kludex))
- Fix readme documentation [\#122](https://github.com/sansyrox/robyn/pull/122) ([sansyrox](https://github.com/sansyrox))
- Release v0.9.0 Changelog [\#121](https://github.com/sansyrox/robyn/pull/121) ([sansyrox](https://github.com/sansyrox))
- \[FEAT\] Open Source Contribution Templates [\#118](https://github.com/sansyrox/robyn/pull/118) ([shivaylamba](https://github.com/shivaylamba))
- FIX : Wrong link for Test Drive [\#117](https://github.com/sansyrox/robyn/pull/117) ([shivaylamba](https://github.com/shivaylamba))
## [v0.9.0](https://github.com/sansyrox/robyn/tree/v0.9.0) (2021-12-01)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.8.1...v0.9.0)
**Closed issues:**
- Add more HTTP methods [\#74](https://github.com/sansyrox/robyn/issues/74)
**Merged pull requests:**
- Fix default url bug [\#111](https://github.com/sansyrox/robyn/pull/111) ([sansyrox](https://github.com/sansyrox))
- Web socket integration attempt 2 [\#109](https://github.com/sansyrox/robyn/pull/109) ([sansyrox](https://github.com/sansyrox))
## [v0.8.1](https://github.com/sansyrox/robyn/tree/v0.8.1) (2021-11-17)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.8.0...v0.8.1)
**Fixed bugs:**
- The default start is running the server at '0.0.0.0' instead of '127.0.0.1' [\#110](https://github.com/sansyrox/robyn/issues/110)
## [v0.8.0](https://github.com/sansyrox/robyn/tree/v0.8.0) (2021-11-10)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.7.1...v0.8.0)
**Closed issues:**
- Share the TCP web socket across different cores [\#91](https://github.com/sansyrox/robyn/issues/91)
- Improve the router [\#52](https://github.com/sansyrox/robyn/issues/52)
- \[Stretch Goal\] Create a a way of writing async request [\#32](https://github.com/sansyrox/robyn/issues/32)
- Improve the router [\#29](https://github.com/sansyrox/robyn/issues/29)
**Merged pull requests:**
- Fix the failing testing suite! [\#100](https://github.com/sansyrox/robyn/pull/100) ([sansyrox](https://github.com/sansyrox))
- Requests object is now optional [\#99](https://github.com/sansyrox/robyn/pull/99) ([sansyrox](https://github.com/sansyrox))
- Add socket sharing [\#94](https://github.com/sansyrox/robyn/pull/94) ([sansyrox](https://github.com/sansyrox))
## [v0.7.1](https://github.com/sansyrox/robyn/tree/v0.7.1) (2021-10-28)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.7.0...v0.7.1)
**Closed issues:**
- Remove the solution using dockerisation of tests [\#98](https://github.com/sansyrox/robyn/issues/98)
- Functions not working without request param [\#96](https://github.com/sansyrox/robyn/issues/96)
- Add actix router [\#85](https://github.com/sansyrox/robyn/issues/85)
- Request apart from GET are not working in directory subroutes [\#79](https://github.com/sansyrox/robyn/issues/79)
- Add the ability to share the server across the network [\#69](https://github.com/sansyrox/robyn/issues/69)
- Add the ability to view headers in the HTTP Methods [\#54](https://github.com/sansyrox/robyn/issues/54)
- Add tests! [\#8](https://github.com/sansyrox/robyn/issues/8)
## [v0.7.0](https://github.com/sansyrox/robyn/tree/v0.7.0) (2021-10-03)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.6.1...v0.7.0)
**Closed issues:**
- Robyn the replacement of Quart [\#86](https://github.com/sansyrox/robyn/issues/86)
- Add Pytest support for the test endpoints [\#81](https://github.com/sansyrox/robyn/issues/81)
**Merged pull requests:**
- Finally completed router integration [\#90](https://github.com/sansyrox/robyn/pull/90) ([sansyrox](https://github.com/sansyrox))
- Address clippy lints [\#89](https://github.com/sansyrox/robyn/pull/89) ([SanchithHegde](https://github.com/SanchithHegde))
- Initial docs update [\#83](https://github.com/sansyrox/robyn/pull/83) ([sansyrox](https://github.com/sansyrox))
- Add the basics of python testing [\#82](https://github.com/sansyrox/robyn/pull/82) ([sansyrox](https://github.com/sansyrox))
- Add a new landing page [\#80](https://github.com/sansyrox/robyn/pull/80) ([sansyrox](https://github.com/sansyrox))
## [v0.6.1](https://github.com/sansyrox/robyn/tree/v0.6.1) (2021-08-30)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.6.0...v0.6.1)
**Closed issues:**
- Make a new release [\#71](https://github.com/sansyrox/robyn/issues/71)
- Update to the pyo3 v0.14 [\#63](https://github.com/sansyrox/robyn/issues/63)
- Add the support to serve static directories [\#55](https://github.com/sansyrox/robyn/issues/55)
- Add support for mounting directory [\#38](https://github.com/sansyrox/robyn/issues/38)
**Merged pull requests:**
- Add the base of http requests [\#78](https://github.com/sansyrox/robyn/pull/78) ([sansyrox](https://github.com/sansyrox))
- Add default port and a variable url [\#77](https://github.com/sansyrox/robyn/pull/77) ([sansyrox](https://github.com/sansyrox))
- Make the request object accessible in every route [\#76](https://github.com/sansyrox/robyn/pull/76) ([sansyrox](https://github.com/sansyrox))
- Add the basics for circle ci and testing framework [\#67](https://github.com/sansyrox/robyn/pull/67) ([sansyrox](https://github.com/sansyrox))
- Update to pyo3 v0.14 [\#65](https://github.com/sansyrox/robyn/pull/65) ([sansyrox](https://github.com/sansyrox))
- Add the static directory serving [\#64](https://github.com/sansyrox/robyn/pull/64) ([sansyrox](https://github.com/sansyrox))
- Create a request object [\#61](https://github.com/sansyrox/robyn/pull/61) ([sansyrox](https://github.com/sansyrox))
- Add the ability to add body in PUT, PATCH and DELETE [\#60](https://github.com/sansyrox/robyn/pull/60) ([sansyrox](https://github.com/sansyrox))
- Implement a working dev server [\#40](https://github.com/sansyrox/robyn/pull/40) ([sansyrox](https://github.com/sansyrox))
- Use Actix as base [\#35](https://github.com/sansyrox/robyn/pull/35) ([JackThomson2](https://github.com/JackThomson2))
## [v0.6.0](https://github.com/sansyrox/robyn/tree/v0.6.0) (2021-08-11)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.5.3...v0.6.0)
**Closed issues:**
- Add body support for PUT, POST and PATCH [\#53](https://github.com/sansyrox/robyn/issues/53)
- Away with limited internet access till 1st August [\#51](https://github.com/sansyrox/robyn/issues/51)
- Add doc stings [\#42](https://github.com/sansyrox/robyn/issues/42)
- OSX builds are failing [\#41](https://github.com/sansyrox/robyn/issues/41)
- Add a dev server implementation [\#37](https://github.com/sansyrox/robyn/issues/37)
- Mini Roadmap | A list of issues that would require fixing [\#19](https://github.com/sansyrox/robyn/issues/19)
- Add support for Object/JSON Return Type! [\#9](https://github.com/sansyrox/robyn/issues/9)
## [v0.5.3](https://github.com/sansyrox/robyn/tree/v0.5.3) (2021-07-12)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.5.2...v0.5.3)
**Merged pull requests:**
- Improve the HTML file serving [\#46](https://github.com/sansyrox/robyn/pull/46) ([sansyrox](https://github.com/sansyrox))
- Add the basics to add serving of static files [\#36](https://github.com/sansyrox/robyn/pull/36) ([sansyrox](https://github.com/sansyrox))
## [v0.5.2](https://github.com/sansyrox/robyn/tree/v0.5.2) (2021-07-11)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.5.1...v0.5.2)
## [v0.5.1](https://github.com/sansyrox/robyn/tree/v0.5.1) (2021-07-10)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.5.0...v0.5.1)
**Closed issues:**
- Make html file serving more robust [\#45](https://github.com/sansyrox/robyn/issues/45)
- Try to serve individual static files using vanilla rust [\#43](https://github.com/sansyrox/robyn/issues/43)
- Error on import [\#16](https://github.com/sansyrox/robyn/issues/16)
- Add multiple process sharing [\#2](https://github.com/sansyrox/robyn/issues/2)
## [v0.5.0](https://github.com/sansyrox/robyn/tree/v0.5.0) (2021-07-01)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.4.1...v0.5.0)
**Closed issues:**
- QPS drops drastically after processing many requests [\#31](https://github.com/sansyrox/robyn/issues/31)
- Improve the way you parse TCP streams [\#30](https://github.com/sansyrox/robyn/issues/30)
- Re-introduce thread pool for the sync functions \(maybe\) [\#22](https://github.com/sansyrox/robyn/issues/22)
- Add async listener object in rust stream! [\#11](https://github.com/sansyrox/robyn/issues/11)
**Merged pull requests:**
- Make the server http compliant [\#33](https://github.com/sansyrox/robyn/pull/33) ([sansyrox](https://github.com/sansyrox))
## [v0.4.1](https://github.com/sansyrox/robyn/tree/v0.4.1) (2021-06-26)
[Full Changelog](https://github.com/sansyrox/robyn/compare/0.4.0...v0.4.1)
**Closed issues:**
- Add PyPi Metadata [\#5](https://github.com/sansyrox/robyn/issues/5)
**Merged pull requests:**
- Build and publish wheels on GitHub Actions [\#26](https://github.com/sansyrox/robyn/pull/26) ([messense](https://github.com/messense))
- Code cleanup using PyFunction type [\#25](https://github.com/sansyrox/robyn/pull/25) ([sansyrox](https://github.com/sansyrox))
- Add non blocking sync functions [\#23](https://github.com/sansyrox/robyn/pull/23) ([sansyrox](https://github.com/sansyrox))
- Add support for sync functions [\#20](https://github.com/sansyrox/robyn/pull/20) ([sansyrox](https://github.com/sansyrox))
## [0.4.0](https://github.com/sansyrox/robyn/tree/0.4.0) (2021-06-22)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.3.0...0.4.0)
**Closed issues:**
- Add support for Sync functions as well! [\#7](https://github.com/sansyrox/robyn/issues/7)
## [v0.3.0](https://github.com/sansyrox/robyn/tree/v0.3.0) (2021-06-21)
[Full Changelog](https://github.com/sansyrox/robyn/compare/v0.2.3...v0.3.0)
**Closed issues:**
- Architecture link in readme redirects to raw content [\#18](https://github.com/sansyrox/robyn/issues/18)
- Link pointing to the wrong destination [\#6](https://github.com/sansyrox/robyn/issues/6)
**Merged pull requests:**
- Pure tokio [\#17](https://github.com/sansyrox/robyn/pull/17) ([JackThomson2](https://github.com/JackThomson2))
- Remove Mutex lock on Threadpool and routes [\#15](https://github.com/sansyrox/robyn/pull/15) ([JackThomson2](https://github.com/JackThomson2))
## [v0.2.3](https://github.com/sansyrox/robyn/tree/v0.2.3) (2021-06-18)
[Full Changelog](https://github.com/sansyrox/robyn/compare/c14f52e6faa79917e89de4220590da7bf28f6a65...v0.2.3)
**Closed issues:**
- Improve async runtime [\#3](https://github.com/sansyrox/robyn/issues/3)
\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*
| /robyn-0.30.0.tar.gz/robyn-0.30.0/CHANGELOG.md | 0.736116 | 0.835819 | CHANGELOG.md | pypi |
<img alt="Robyn Logo" src="https://user-images.githubusercontent.com/29942790/140995889-5d91dcff-3aa7-4cfb-8a90-2cddf1337dca.png" width="250" />
# Robyn
[](https://twitter.com/robyn_oss)
[](https://pepy.tech/project/robyn)
[](https://github.com/sansyrox/robyn/releases/)
[](#license)

[](https://sansyrox.github.io/robyn/#/)
[](https://discord.gg/rkERZ5eNU8)
Robyn is a fast async Python web framework coupled with a web server written in Rust. You can learn more by checking our [community resources](https://sansyrox.github.io/robyn/#/community-resources)!
## 📦 Installation
You can simply use Pip for installation.
```
pip install robyn
```
Or, with [conda-forge](https://conda-forge.org/)
```
conda install -c conda-forge robyn
```
## 🤔 Usage
### 🚀 Define your API
To define your API, you can add the following code in an `app.py` file.
```python
from robyn import Robyn
app = Robyn(__file__)
@app.get("/")
async def h(request):
return "Hello, world!"
app.start(port=8080)
```
### 🏃 Run your code
Simply run the app.py file you created. You will then have access to a server on the `localhost:8080`, that you can request from an other program. Robyn provides several options to customize your web server.
```
$ python3 app.py -h
usage: app.py [-h] [--processes PROCESSES] [--workers WORKERS] [--dev] [--log-level LOG_LEVEL]
Robyn, a fast async web framework with a rust runtime.
options:
-h, --help show this help message and exit
--processes PROCESSES Choose the number of processes. [Default: 1]
--workers WORKERS Choose the number of workers. [Default: 1]
--dev Development mode. It restarts the server based on file changes.
--log-level LOG_LEVEL Set the log level name
```
Log level can be `DEBUG`, `INFO`, `WARNING`, or `ERROR`.
### 💻 Add more routes
You can add more routes to your API. Check out the routes in [this file](https://github.com/sansyrox/robyn/blob/main/integration_tests/base_routes.py) as examples.
## 🐍 Python Version Support
Robyn is compatible with the following Python versions:
> Python >= 3.7
It is recommended to use the latest version of Python for the best performances.
Please make sure you have the correct version of Python installed before starting to use
this project. You can check your Python version by running the following command in your
terminal:
```
python --version
```
## 💡 Features
- Under active development!
- Written in Rust, btw xD
- A multithreaded Runtime
- Extensible
- A simple API
- Sync and Async Function Support
- Dynamic URL Routing
- Multi Core Scaling
- WebSockets!
- Middlewares
- Hot Reloading
- Community First and truly FOSS!
## 🗒️ How to contribute
### 🏁 Get started
Please read the [code of conduct](https://github.com/sansyrox/robyn/blob/main/CODE_OF_CONDUCT.md) and go through [CONTRIBUTING.md](https://github.com/sansyrox/robyn/blob/main/CONTRIBUTING.md) before contributing to Robyn.
Feel free to open an issue for any clarifications or suggestions.
If you're feeling curious. You can take a look at a more detailed architecture [here](https://sansyrox.github.io/robyn/#/architecture).
If you still need help to get started, feel free to reach out on our [community discord](https://discord.gg/rkERZ5eNU8).
### ⚙️ To Develop Locally
1. Install the development dependencies (preferably inside a virtual environment): `pip install -r dev-requirements.txt`
2. Install the pre-commit git hooks: `pre-commit install`
3. Run `maturin develop` or `maturin develop --cargo-extra-args="--features=io-uring"` for using the experimental version of actix-web. This command will build the Robyn Rust package and install it in your virtual environment.
4. Run `python3 integration_tests/base_routes.py`. This file contains several examples of routes we use for testing purposes. You can modify or add some to your likings.
You can then request the server you ran from an other terminal. Here is a `GET` request done using [curl](https://curl.se/) for example:
```
curl http://localhost:8080/sync/str
```
## ✨ Special thanks
### ✨ Contributors/Supporters
Thanks to all the contributors of the project. Robyn will not be what it is without all your support :heart:.
<a href="https://github.com/sansyrox/robyn/graphs/contributors">
<img src="https://contrib.rocks/image?repo=sansyrox/robyn" />
</a>
Special thanks to the [PyO3](https://pyo3.rs/v0.13.2/) community and [Andrew from PyO3-asyncio](https://github.com/awestlake87/pyo3-asyncio) for their amazing libraries and their support for my queries. 💖
### ✨ Sponsors
These sponsors help us make the magic happen!
[](https://www.digitalocean.com/?refcode=3f2b9fd4968d&utm_campaign=Referral_Invite&utm_medium=Referral_Program&utm_source=badge)
[](https://github.com/appwrite)
- [Shivay Lamba](https://github.com/shivaylamba)
| /robyn-0.30.0.tar.gz/robyn-0.30.0/docs/README.md | 0.498291 | 0.893263 | README.md | pypi |
import numpy as np
from roc_aggregator.validations import validate_input
def partial_cm(fpr, tpr, thresholds, negative_count, total_count, descending=False):
""" Compute the partial confusion matrix from the tpr and fpr.
"""
# Arrange the necessary parameters
node_indexes = np.repeat(range(len(thresholds)), [len(th) for th in thresholds])
thresholds_stack = np.hstack(thresholds)
shift = 0
acc = np.zeros((len(thresholds_stack), 2))
for i, node_thresholds in enumerate(thresholds):
# Shift the index and thresholds according to the node
# Necessary to guarantee that the current node thresholds
# are always consider first when sorted
shift -= len(node_thresholds)
node_indexes_shifted = np.roll(node_indexes, shift)
thresholds_stack_shifted = np.roll(thresholds_stack, shift)
# Sort all the thresholds
sorted_indexes = np.argsort(thresholds_stack_shifted)[::-1]
# Build an index list based on the i node values by doing a cumulative sum.
# Thresholds below the smallest threshold for the i node will have an index
# of -1 that will later be mapped to the first entry in the confusion
# matrix
sum = np.cumsum(np.equal(node_indexes_shifted, i)[sorted_indexes]) - 1
# Calculating the partial confusion matrix (fp and tp) and sorting it
# by threshold
cm = np.multiply(
np.column_stack([np.array(fpr[i]), np.array(tpr[i])]),
[negative_count[i], total_count[i] - negative_count[i]]
)
cm_sorted = cm[np.argsort(node_thresholds)[::-1]]
# Add the tp and fp values to the global array
acc += np.append(
cm_sorted,
[[cm_sorted[0][0], cm_sorted[0][1]]],
axis=0
)[sum, :]
# Sort the thresholds and remove repeated entries
thresholds_stack_sorted, unique_ind = np.unique(
np.sort(thresholds_stack)[::-1] if descending else np.sort(thresholds_stack),
return_index=True
)
if descending:
cm = acc[unique_ind[::-1], :]
thresholds_stack_sorted = thresholds_stack_sorted[::-1]
else:
cm = acc[::-1][unique_ind, :]
return cm, thresholds_stack_sorted
def roc_curve(fpr, tpr, thresholds, negative_count, total_count):
""" Compute Receiver operating characteristic (ROC).
Parameters
----------
fpr: list - False positive rates for each individual ROC.
tpr: list - True positive rates for each individual ROC.
thresholds: list - Thresholds used to compute the fpr and tpr.
negative_count: list - Total number of samples corresponding to the negative case.
total_count: list - Total number of samples.
Returns
-------
fpr_global: np.array() - The false positive rates for the global ROC.
tpr_global: np.array() - The true positive rates for the global ROC.
thresholds_stack: np.array() - The thresholds used to compute the fpr and tpr.
Raises
------
TypeError
If the parameters' dimensions don't match.
"""
#validate_input(fpr, tpr, thresholds, negative_count, total_count)
# Obtain the partial confusion matrix (tp and fp)
cm_partial, thresholds_stack = partial_cm(
fpr, tpr, thresholds, negative_count, total_count, descending=True)
# Compute the global fpr and tpr
fpr_global = np.divide(cm_partial[:, 0], np.sum(negative_count))
tpr_global = np.divide(cm_partial[:, 1], (np.sum(total_count) - np.sum(negative_count)))
return fpr_global, tpr_global, thresholds_stack
def precision_recall_curve(fpr, tpr, thresholds, negative_count, total_count):
""" Compute the precision recall curve.
Parameters
----------
fpr: list - False positive rates for each individual ROC.
tpr: list - True positive rates for each individual ROC.
thresholds: list - Thresholds used to compute the fpr and tpr.
negative_count: list - Total number of samples corresponding to the negative case.
total_count: list - Total number of samples.
Returns
-------
pre: np.array() - The precision for the global ROC.
recall: np.array() - The recall for the global ROC.
thresholds_stack: np.array() - The thresholds used to compute the fpr and tpr.
Raises
------
TypeError
If the parameters' dimensions don't match.
"""
#validate_input(fpr, tpr, thresholds, negative_count, total_count)
# Obtain the partial confusion matrix (tp and fp)
cm_partial, thresholds_stack = partial_cm(fpr, tpr, thresholds, negative_count, total_count)
# Compute the tpr/recall and precision
pre_dividend = cm_partial[:, 1] + cm_partial[:, 0]
pre = np.divide(cm_partial[:, 1], pre_dividend, out=np.ones(len(cm_partial)), where=pre_dividend!=0)
recall = np.divide(cm_partial[:, 1], (np.sum(total_count) - np.sum(negative_count)))
return pre, recall, thresholds_stack | /roc_aggregator-1.1.2.tar.gz/roc_aggregator-1.1.2/roc_aggregator/__init__.py | 0.900346 | 0.544801 | __init__.py | pypi |
# Reference documentation: ROC-GEN-SYS-NTT-00038-LES
"""
Database model for:
- sbm_log,
- lfr_kcoeff_dump
- bia_sweep_log,
- efecs_events
- event_log
- solohk_param
- process_queue
tables.
"""
from poppy.core.db.non_null_column import NonNullColumn
from poppy.core.db.base import Base
from sqlalchemy import String, UniqueConstraint
from sqlalchemy.dialects.postgresql import (
BIGINT,
BOOLEAN,
FLOAT,
ENUM,
SMALLINT,
TIMESTAMP,
JSONB,
)
__all__ = [
'SbmLog',
'BiaSweepLog',
'LfrKcoeffDump',
'EventLog',
'SoloHkParam',
'ProcessQueue',
'EfecsEvents',
'HfrTimeLog',
'Event'
]
# Create enumeration for sbm_log.status column
SBM_STATUS_LIST = ['Available', 'Requested',
'Downlinked', 'Deleted', 'Missed', 'Unknown']
sbm_status_enum = ENUM(
*SBM_STATUS_LIST,
name='sbm_status_type',
# schema='pipeline',
)
class SbmLog(Base):
"""
Class representation of the table for sbm_log table in the ROC database.
"""
id = NonNullColumn(BIGINT(), primary_key=True)
sbm_type = NonNullColumn(
SMALLINT(),
descr='Type of SBM event (1=SBM1 or 2=SBM2)')
cuc_time = NonNullColumn(
String(1024),
descr='SBM event occurrence on-board time in CCSDS CUC'
'format "coarse:fine".')
obt_time = NonNullColumn(
TIMESTAMP(),
descr='SBM event occurrence on-board time in SQL timestamp format')
utc_time = NonNullColumn(
TIMESTAMP(),
nullable=True,
descr='SBM event occurrence UTC time in SQL timestamp format')
utc_time_is_predictive = NonNullColumn(
BOOLEAN,
nullable=True,
descr='Flag to indicate if UTC time is predictive (True)'
'or definitive (False)')
sbm_qf = NonNullColumn(
FLOAT(),
descr='SBM detection quality factor')
sbm_algo = NonNullColumn(
SMALLINT(),
descr='SBM detection algorithm status')
sbm_algo_param = NonNullColumn(
JSONB(),
nullable=True,
descr='List of SBM algo parameters (JSON format)')
retrieved_time = NonNullColumn(
TIMESTAMP(),
nullable=True,
descr='Local date/time at which the SBM event science data'
'has been retrieved.')
selected = NonNullColumn(
BOOLEAN,
nullable=True,
descr='True if SBM event is selected')
status = NonNullColumn(
sbm_status_enum,
descr='Status of the sbm event.'
f'Possible values are: {SBM_STATUS_LIST}')
insert_time = NonNullColumn(
TIMESTAMP(),
nullable=True,
descr='Database insertion local time.')
__tablename__ = 'sbm_log'
__table_args__ = (
UniqueConstraint('cuc_time', 'sbm_type'),
{
'schema': 'pipeline',
}
)
class BiaSweepLog(Base):
"""
Class representation of the table for bia_sweep_log table
in the ROC database.
"""
id = NonNullColumn(BIGINT(), primary_key=True)
sweep_step = NonNullColumn(
String(16),
descr='Step of the Bias sweep'
'(TM_DPU_EVENT_PR_BIA_SWEEP.PA_DPU_BIA_SWEEP_PR_CODE values)')
utc_time = NonNullColumn(
TIMESTAMP(),
descr='Sweep step UTC time')
cuc_time = NonNullColumn(
String(1024),
descr='Sweep step on-board time in CCSDS CUC format "coarse:fine".')
utc_time_is_predictive = NonNullColumn(
BOOLEAN, nullable=True,
descr='Flag to indicate if UTC time is predictive (True)'
'or definitive (False)')
insert_time = NonNullColumn(
TIMESTAMP(),
nullable=True,
descr='Database insertion local time.')
__tablename__ = 'bia_sweep_log'
__table_args__ = (
UniqueConstraint('sweep_step', 'utc_time'),
{
'schema': 'pipeline',
}
)
class LfrKcoeffDump(Base):
"""
Class representation of the table for lfr_kcoeff_dump table
in the ROC database.
"""
id = NonNullColumn(BIGINT(), primary_key=True)
utc_time = NonNullColumn(
TIMESTAMP(),
descr='TM_LFR_KCOEFFICIENTS_DUMP packet creation time (UTC)')
cuc_time = NonNullColumn(
String(1024),
descr='Packet creation on-board time in CCSDS CUC'
'format "coarse:fine".')
utc_time_is_predictive = NonNullColumn(
BOOLEAN,
nullable=True,
descr='Flag to indicate if UTC time is predictive (True)'
'or definitive (False)')
kcoeff_pkt_cnt = NonNullColumn(
SMALLINT(),
descr='Total count of packets for LFR inter calibration factors dump.'
'(PA_LFR_KCOEFF_PKT_CNT)')
kcoeff_pkt_nr = NonNullColumn(
SMALLINT(),
descr='Number of the packet for LFR inter calibration factors dump.'
'(PA_LFR_KCOEFF_PKT_NR)')
kcoeff_blk_nr = NonNullColumn(
SMALLINT(),
descr='Number of block LFR_KCOEFFICENT_PARAMETERS in the packet'
'(PA_LFR_KCOEFF_BLK_NR)')
kcoeff_values = NonNullColumn(
JSONB(),
descr='32 values of the Kcoeff for the current list of frequencies'
'(json format)')
insert_time = NonNullColumn(
TIMESTAMP(),
nullable=True,
descr='Database insertion local time.')
__tablename__ = 'lfr_kcoeff_dump'
__table_args__ = (
UniqueConstraint('utc_time', 'kcoeff_pkt_nr'),
{
'schema': 'pipeline',
}
)
class EventLog(Base):
"""
Class representation of the table for event_log table in the ROC database.
"""
id = NonNullColumn(BIGINT(), primary_key=True)
start_time = NonNullColumn(
TIMESTAMP(),
descr='Event UTC start time')
end_time = NonNullColumn(
TIMESTAMP(),
descr='Event UTC end time')
description = NonNullColumn(
JSONB(),
descr='Description of event')
label = NonNullColumn(
String(),
descr='Label of event')
is_predictive = NonNullColumn(
BOOLEAN,
nullable=True,
descr='Flag to indicate if event is predictive (True)'
'or definitive (False)')
insert_time = NonNullColumn(
TIMESTAMP(),
nullable=True,
descr='Database insertion local time.')
__tablename__ = 'event_log'
__table_args__ = (
UniqueConstraint('start_time', 'end_time', 'label', 'description'),
{
'schema': 'pipeline',
}
)
class SoloHkParam(Base):
"""
Class representation of the table solo_hk_param in the ROC database.
"""
id = NonNullColumn(BIGINT(), primary_key=True)
name = NonNullColumn(
String(16),
descr='Parameter name (SRDB ID)')
utc_time = NonNullColumn(
TIMESTAMP(),
descr='ParamSampleListElement.TimeStampAsciiA XML tag value')
description = NonNullColumn(
String(),
descr='ParamSampleListElement.Description XML tag value')
unit = NonNullColumn(
String(),
descr='ParamSampleListElement.Unit XML tag value')
eng_value = NonNullColumn(
String(),
descr='ParamSampleListElement.EngineeringValue XML tag value')
raw_value = NonNullColumn(
String(),
descr='ParamSampleListElement.RawValue XML tag value')
sha = NonNullColumn(
String(),
descr='SHA of the element. Computed from'
'(name, time_stamp, raw_value)')
__tablename__ = 'solo_hk_param'
__table_args__ = (
UniqueConstraint('sha'),
{
'schema': 'pipeline',
}
)
class ProcessQueue(Base):
"""
Class representation of the table process_queue in the ROC database.
"""
id = NonNullColumn(BIGINT(), primary_key=True)
dataset_id = NonNullColumn(
String(512),
descr='Dataset ID in the ROC database')
start_time = NonNullColumn(
TIMESTAMP(),
descr='Start time of data')
end_time = NonNullColumn(
TIMESTAMP(),
nullable=True,
descr='End time of data')
version = NonNullColumn(
String(16),
nullable=True,
descr='Data version of the file')
file = NonNullColumn(
String(),
nullable=True,
descr='Dataset file name'
)
insert_time = NonNullColumn(
TIMESTAMP(),
nullable=True,
descr='Database insertion local time.')
__tablename__ = 'process_queue'
__table_args__ = (
{
'schema': 'pipeline',
}
)
class EfecsEvents(Base):
"""
Class representation of the table efecs_events in the ROC database.
"""
id = NonNullColumn(BIGINT(), primary_key=True)
name = NonNullColumn(
String(),
descr='EFECS event name')
utc_time = NonNullColumn(
TIMESTAMP(),
descr='EFECS event time')
attributes = NonNullColumn(
JSONB(),
descr='EFECS event attributes (in JSON format)',
nullable=True)
ltp_count = NonNullColumn(
SMALLINT(),
descr='LTP counter')
gen_time = NonNullColumn(
TIMESTAMP(),
descr='EFECS generation time')
__tablename__ = 'efecs_events'
__table_args__ = (
UniqueConstraint('name', 'utc_time'),
{
'schema': 'pipeline',
}
)
class HfrTimeLog(Base):
"""
Class representation of the table hfr_time_log in the ROC database.
See https://gitlab.obspm.fr/ROC/RCS/THR_CALBAR/-/issues/76 for details
about why this table is needed
"""
id = NonNullColumn(BIGINT(), primary_key=True)
acq_time = NonNullColumn(
TIMESTAMP(),
descr='HFR TM PA_THR_ACQUISITION_TIME human readable time',
)
coarse_time = NonNullColumn(
BIGINT(),
descr='HFR TM PA_THR_ACQUISITION_TIME, coarse part')
fine_time = NonNullColumn(
BIGINT(),
descr='HFR TM PA_THR_ACQUISITION_TIME, fine part')
mode = NonNullColumn(
SMALLINT(),
descr='HFR mode (0=NORMAL, 1=BURST)',
)
delta_time1 = NonNullColumn(
JSONB(),
descr='Values of delta_time for HF1 for each TM '
'for the current PA_THR_ACQUISITION_TIME value. '
'Keywords are packet creation times (coarse * 100000 + fine)',
nullable=True
)
delta_time2 = NonNullColumn(
JSONB(),
descr='Values of delta_time for HF2 for each TM '
'for the current PA_THR_ACQUISITION_TIME value. '
'Keywords are packet creation times (coarse * 100000 + fine)',
nullable=True
)
__tablename__ = 'hfr_time_log'
__table_args__ = (
UniqueConstraint('coarse_time', 'fine_time', 'mode'),
{
'schema': 'pipeline',
}
)
# Create enumeration for event.origin column
EVENT_ORIGIN_LIST = ['SOLO', 'RPW']
event_origin_enum = ENUM(
*EVENT_ORIGIN_LIST,
name='event_origin_enum',
# schema='pipeline',
)
class Event(Base):
"""
Class listing the different events for RPW
"""
id = NonNullColumn(BIGINT(), primary_key=True)
label = NonNullColumn(
String(),
descr='Event label'
)
is_tracked = NonNullColumn(
BOOLEAN, default=True,
descr='True if event has to be tracked'
)
is_anomaly = NonNullColumn(
BOOLEAN, default=True,
descr='True if event is an anomaly'
)
origin = NonNullColumn(
event_origin_enum,
descr='Origin of the event.'
f'Possible values are: {EVENT_ORIGIN_LIST}'
)
__tablename__ = 'events'
__table_args__ = (
UniqueConstraint('label'),
{
'schema': 'pipeline',
}
) | /roc_dingo-1.3.8-py3-none-any.whl/roc/dingo/models/data.py | 0.773174 | 0.209874 | data.py | pypi |
# Reference documentation
# ROC-GEN-SYS-NTT-00038-LES_Iss01_Rev02(Mission_Database_Description_Document)
"""
Database model for rpw file processing history tables.
"""
from sqlalchemy import ForeignKey, String, Table, Column
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.postgresql import \
BIGINT, INTEGER, TIMESTAMP, ENUM, BOOLEAN
from poppy.core.db.base import Base
from poppy.core.db.non_null_column import NonNullColumn
__all__ = [
'FileLog',
]
# File lists
FILE_STATE_LIST = ['OK', 'WARNING', 'ERROR']
FILE_STATUS_LIST = ['Pended', 'Terminated', 'InProgress', 'Locked', 'Failed']
FILE_LEVEL_LIST = [
'CAL', 'TC', 'TM', 'SK', 'L0', 'HK', 'L1',
'BIA', 'L1R', 'L2', 'L3', 'LL01']
# file_action_list = ['Process', 'Reprocess', 'Lock', 'Unlock', 'Delete']
file_state_enum = ENUM(
*FILE_STATE_LIST, name='file_state_list')
file_status_enum = ENUM(
*FILE_STATUS_LIST, name='file_status_list')
file_level_enum = ENUM(
*FILE_LEVEL_LIST, name='file_level_list')
ParentsFileLog = Table(
'parents_file_log',
Base.metadata,
Column('id_parent', INTEGER, ForeignKey('pipeline.file_log.id')),
Column('id_child', INTEGER, ForeignKey('pipeline.file_log.id')),
schema='pipeline'
)
class FileLog(Base):
"""
Class representation of the table for file_log table in the ROC database.
"""
id = NonNullColumn(
BIGINT(),
primary_key=True)
sha = NonNullColumn(
String(64),
nullable=True,
descr='SHA256 of the file')
basename = NonNullColumn(
String(512),
descr='Basename of the file',
unique=True)
product = NonNullColumn(
String(512),
descr='Basename of the file without extension and version number')
version = NonNullColumn(
String(16),
descr='Data version of the file')
state = NonNullColumn(
file_state_enum,
descr=f'State of the file. Possible values are: {FILE_STATE_LIST}')
status = NonNullColumn(
file_status_enum,
descr=f'Status of the file. Possible values are: {FILE_STATUS_LIST}')
level = NonNullColumn(
file_level_enum,
descr=f'Level of the file. Possible values are: {FILE_LEVEL_LIST}')
creation_time = NonNullColumn(
TIMESTAMP(),
nullable=True,
descr='Local date and time of the file creation',
comment='')
insert_time = NonNullColumn(
TIMESTAMP(),
descr='Local date and time of the file insertion in the database',
nullable=True)
descr = NonNullColumn(
String(512),
nullable=True,
descr='Description of the file')
author = NonNullColumn(
String(512),
nullable=True,
descr='Author of the file')
dirname = NonNullColumn(
String(512),
nullable=True,
descr='Relative path of the file directory (from data root base)')
url = NonNullColumn(
String(512),
nullable=True,
descr='URL of the file in the HTTPS server')
size = NonNullColumn(
BIGINT(),
nullable=True,
descr='Size of the file in kilobytes')
start_time = NonNullColumn(
TIMESTAMP(),
nullable=True,
descr='Start time of file')
end_time = NonNullColumn(
TIMESTAMP(),
nullable=True,
descr='End time of file')
validity_start = NonNullColumn(
TIMESTAMP(),
nullable=True,
descr='Start time of validity range (for CAL files)')
validity_end = NonNullColumn(
TIMESTAMP(),
nullable=True,
descr='End time of validity range (for CAL files)')
dataset_id = NonNullColumn(
String(512),
nullable=True,
descr='Dataset ID in the ROC database')
is_archived = NonNullColumn(
BOOLEAN,
default=False,
descr='True if file has been archived at ESAC')
is_delivered = NonNullColumn(
BOOLEAN,
default=False,
descr='True if file has been delivered ESAC')
public_filename = NonNullColumn(
String(512),
descr='Public filename of the file delivered to ESAC',
nullable=True,
unique=False)
public_dirname = NonNullColumn(
String(512),
descr='Public dirname of the file delivered to ESAC',
nullable=True,
unique=False)
is_latest = NonNullColumn(
BOOLEAN,
default=False,
descr='True if the version is the latest one')
is_removed = NonNullColumn(
BOOLEAN,
default=False,
descr='True if the file has been removed from the file directory')
error_log = NonNullColumn(
String(),
nullable=True,
descr='Log message when an error occured during insertion')
to_update = NonNullColumn(
BOOLEAN,
default=False,
descr='True if file has to be updated by DINGO')
parents = relationship(
'FileLog',
secondary=ParentsFileLog,
primaryjoin=ParentsFileLog.c.id_child == id,
secondaryjoin=ParentsFileLog.c.id_parent == id,
backref='children',
cascade='all, delete',
)
__tablename__ = 'file_log'
__table_args__ = (
{
'schema': 'pipeline',
}
)
def as_dict(self):
# init the dictionnary
log_file_dict = {
c.name: getattr(self, c.name)
for c in self.__table__.columns
}
# delete some useless columns
log_file_dict.pop('id')
log_file_dict.pop('insert_time')
# datetime formatting
fields = ['creation_time', 'start_time', 'end_time',
'validity_start', 'validity_end']
for f in fields:
time = log_file_dict[f]
if time is not None:
log_file_dict[f] = time.isoformat()
log_file_dict['parents'] = []
for p in self.parents:
log_file_dict['parents'].append(p.basename)
log_file_dict['parents'].sort()
return log_file_dict | /roc_dingo-1.3.8-py3-none-any.whl/roc/dingo/models/file.py | 0.583915 | 0.217317 | file.py | pypi |
# Reference documentation: ROC-GEN-SYS-NTT-00038-LES
"""
Database model for tm_log and tc_log tables.
"""
from poppy.core.db.non_null_column import NonNullColumn
from poppy.core.db.base import Base
from sqlalchemy import String, ForeignKey, UniqueConstraint, Index
from sqlalchemy.orm import relationship, validates, backref
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.dialects.postgresql import (
BIGINT,
BOOLEAN,
DOUBLE_PRECISION,
ENUM,
INTEGER,
SMALLINT,
TIMESTAMP,
JSONB,
)
__all__ = [
'TmLog',
'TcLog',
'InvalidPacketLog',
]
class TmLog(Base):
"""
Class representation of the table for tm_log table in the ROC database.
"""
id = NonNullColumn(BIGINT(), primary_key=True)
length = NonNullColumn(INTEGER(), nullable=True,
descr='Packet length in bytes')
category = NonNullColumn(String(512), nullable=True,
descr='Packet PALISADE category')
apid = NonNullColumn(INTEGER(), nullable=True,
descr='Packet APID')
sync_flag = NonNullColumn(BOOLEAN, nullable=True,
descr='TM packet time synchronization flag')
srdb_id = NonNullColumn(String(16), nullable=True,
descr='Packet name (SRDB ID)')
palisade_id = NonNullColumn(String(256), nullable=True,
descr='Packet PALISADE ID')
binary = NonNullColumn(String(), nullable=True,
descr='Packet raw binary data (in hexadecimal)')
data = NonNullColumn(JSONB(), nullable=True,
descr='Packet source data (JSON format)')
sha = NonNullColumn(String(),
descr='Packet sha (hexdigest)')
sequence_cnt = NonNullColumn(BIGINT(), nullable=True,
descr='Packet sequence counter')
cuc_time = NonNullColumn(String(1024),
descr='Packet creation on-board time in CCSDS CUC format "coarse:fine".')
obt_time = NonNullColumn(TIMESTAMP, nullable=True,
descr='Packet creation on-board time in SQL timestamp format".')
utc_time = NonNullColumn(TIMESTAMP, nullable=True,
descr='Packet creation UTC time in SQL timestamp format".')
utc_time_is_predictive = NonNullColumn(BOOLEAN, nullable=True,
descr='Flag to indicate if Packet creation UTC time is predictive (True) or definitive (False)')
insert_time = NonNullColumn(TIMESTAMP,
descr='Database insertion local time.')
__tablename__ = 'tm_log'
__table_args__ = (
UniqueConstraint('sha'),
{
'schema': 'pipeline',
}
)
class TcLog(Base):
"""
Class representation of the table for tc_log table in the ROC database.
"""
id = NonNullColumn(BIGINT(), primary_key=True)
length = NonNullColumn(INTEGER(),
descr='Packet length in bytes')
category = NonNullColumn(String(512), nullable=True,
descr='Packet PALISADE category')
apid = NonNullColumn(INTEGER(), nullable=True,
descr='Packet APID')
utc_time = NonNullColumn(TIMESTAMP,
descr='Packet execution time in UTC')
srdb_id = NonNullColumn(String(16),
descr='Packet name (SRDB ID)')
palisade_id = NonNullColumn(String(256),
descr='Packet PALISADE ID')
binary = NonNullColumn(String(), nullable=True,
descr='Packet raw binary data (in hexadecimal)')
data = NonNullColumn(JSONB(), nullable=True,
descr='Packet application data (JSON format)')
sha = NonNullColumn(String(),
descr='Packet sha (hexdigest)')
tc_exe_state = NonNullColumn(String(16), nullable=True,
descr='TC acknowledgment'
'execution completion status')
tc_acc_state = NonNullColumn(String(16), nullable=True,
descr='TC acknowledgment acceptance status')
sequence_name = NonNullColumn(String(16), nullable=True,
descr='TC sequence name')
unique_id = NonNullColumn(String(256), nullable=True,
descr='TC unique ID')
insert_time = NonNullColumn(TIMESTAMP,
descr='Database insertion local time.')
__tablename__ = 'tc_log'
__table_args__ = (
UniqueConstraint('sha'),
{
'schema': 'pipeline',
}
)
class InvalidPacketLog(Base):
"""
Class representation of the table for invalid_packet_log table in the ROC database.
"""
id = NonNullColumn(BIGINT(), primary_key=True)
sha = NonNullColumn(String(),
descr='sha256 computed from invalid packet data (hexdigest)')
srdb_id = NonNullColumn(String(16), nullable=True,
descr='Packet name (SRDB ID)')
apid = NonNullColumn(INTEGER(), nullable=True,
descr='Packet APID')
palisade_id = NonNullColumn(String(256), nullable=True,
descr='Packet PALISADE ID')
utc_time = NonNullColumn(TIMESTAMP, nullable=True,
descr='Packet time in UTC')
data = NonNullColumn(JSONB(), nullable=True,
descr='Packet content data (when extraction is possible)')
status = NonNullColumn(SMALLINT(),
descr='Status of the packet (INVALID_PACKET_HEADER = 1, '
'INVALID_PACKET_DATA = 2, OUTSIDE_RANGE_PACKET = 3, '
'INVALID_PACKET_CDATA = 4, INVALID_PACKET_TIME = 5, '
'UNKNOWN_STATUS = -1)')
comment = NonNullColumn(String(), nullable=True,
descr='Additional comment about packet status')
insert_time = NonNullColumn(TIMESTAMP,
descr='Database insertion local time.')
__tablename__ = 'invalid_packet_log'
__table_args__ = (
UniqueConstraint('sha'),
{
'schema': 'pipeline',
}
) | /roc_dingo-1.3.8-py3-none-any.whl/roc/dingo/models/packet.py | 0.792022 | 0.198025 | packet.py | pypi |
import os
from pathlib import Path
from poppy.core.logger import logger
from poppy.core.task import Task
__all__ = ['MoveProcessedFiles', 'MoveFailedFiles']
from roc.dingo.tools import safe_move
class MoveProcessedFiles(Task):
"""Task to move input list of well processed files in a given target directory."""
plugin_name = 'roc.dingo'
name = 'move_processed_files'
def setup_inputs(self):
# See if --no-move keyword is defined
self.no_move = self.pipeline.get('no_move', default=False, args=True)
# Get or create processed_files list from pipeline properties
self.processed_files = self.pipeline.get(
'processed_files', default=[], create=True)
# Get or create processed_files_dir from pipeline properties
self.processed_files_dir = self.pipeline.get(
'processed_files_dir', default=[None], create=True)[0]
def run(self):
try:
self.setup_inputs()
except:
logger.exception(
'Initializing inputs for task MoveProcessedFiles has failed!')
self.pipeline.exit()
return
if self.no_move:
logger.debug(
'Skip MoveProcessedFiles task')
return
if not self.processed_files:
logger.debug(
'Input list of processed files is empty: skip MoveProcessedFiles task')
return
else:
if not self.processed_files_dir:
logger.debug(
'processed_files_dir argument not passed, skip task')
return
# Create folder if it does not exist
Path(self.processed_files_dir).mkdir(parents=True, exist_ok=True)
for current_file in self.processed_files:
logger.info(f'Moving {current_file} into {self.processed_files_dir}')
if not safe_move(current_file, self.processed_files_dir):
logger.error(f'Cannot move {current_file} into {self.processed_files_dir}')
class MoveFailedFiles(Task):
"""Move failed files found
into a target directory."""
plugin_name = 'roc.dingo'
name = 'move_failed_files'
def setup_inputs(self):
# See if --no-move keyword is defined
self.no_move = self.pipeline.get('no_move', default=False, args=True)
# Get or create failed_files list from pipeline properties
self.failed_files = self.pipeline.get(
'failed_files', default=[], create=True)
# Get or create failed_files_dir list from pipeline properties
self.failed_files_dir = self.pipeline.get(
'failed_files_dir', default=[None], create=True)[0]
def run(self):
try:
self.setup_inputs()
except:
logger.exception(
'Initializing inputs for task MoveFailedFiles has failed!')
self.pipeline.exit()
return
if self.no_move:
logger.debug(
'Skip current task MoveFailedFiles')
return
if not self.failed_files:
logger.debug(
'Input list of failed files is empty: skip MoveFailedFiles task')
return
else:
if not self.failed_files_dir:
logger.warning(f'There are failed files but failed_files_dir value is not defined, skip MoveFailedFiles task')
return
# Create folder if it does not exist
Path(self.failed_files_dir).mkdir(parents=True, exist_ok=True)
for current_file in self.failed_files:
logger.info(f'Moving {current_file} into {self.failed_files_dir}')
# If failed file does not exist, create an empty file
if not os.path.isfile(current_file):
Path(current_file).touch(exist_ok=True)
if not safe_move(current_file, self.failed_files_dir):
logger.error(f'Cannot move {current_file} into {self.failed_files_dir}') | /roc_dingo-1.3.8-py3-none-any.whl/roc/dingo/tasks/file_handler.py | 0.536556 | 0.160595 | file_handler.py | pypi |
from datetime import datetime, timedelta
from pathlib import Path
import uuid
from sqlalchemy import and_
from poppy.core.logger import logger
from poppy.core.db.connector import Connector
from poppy.core.task import Task
from roc.dingo.constants import PIPELINE_DATABASE, TRYOUTS, TIME_WAIT_SEC, SQL_LIMIT, NAIF_SOLO_ID
from roc.dingo.models.data import SbmLog, BiaSweepLog, LfrKcoeffDump
from roc.dingo.models.packet import TmLog
from roc.dingo.tools import load_spice, query_db, is_sclk_uptodate, valid_time
class UpdateUtcTime(Task):
"""
Update UTC times in the
in ROC database
"""
plugin_name = 'roc.dingo'
name = 'update_utc_time'
def add_targets(self):
pass
@Connector.if_connected(PIPELINE_DATABASE)
def setup_inputs(self):
# Get SOLO SPICE kernels (SCLK and LSK)
self.sclk_file = self.pipeline.get(
'sclk', default=[None], create=True)[0]
self.lsk_file = self.pipeline.get(
'lsk', default=[None], create=True)[0]
if not self.sclk_file or not self.lsk_file:
raise FileNotFoundError(
'Both sclk_file and lsk_file must be passed as inputs to run UpdateUtcTime!')
else:
# Load SPICE kernels
self.spice = load_spice(spice_kernels=[self.lsk_file,
self.sclk_file])
# Get start_time input value
self.start_time = valid_time(self.pipeline.get(
'start_time', default=[None])[0],
)
# Get end_time input value
self.end_time = valid_time(self.pipeline.get(
'end_time', default=[None])[0],
)
# get a database session
self.session = Connector.manager[PIPELINE_DATABASE].session
# Get tryouts from pipeline properties
self.tryouts = self.pipeline.get(
'tryouts', default=[TRYOUTS], create=True)[0]
# Get wait from pipeline properties
self.wait = self.pipeline.get(
'wait', default=[TIME_WAIT_SEC], create=True)[0]
# Retrieve --limit keyword value
self.limit = self.pipeline.get('limit',
default=[SQL_LIMIT],
args=True)[0]
# Database insertion datetime
self.insert_time = datetime.today()
def run(self):
# Define task job ID (long and short)
self.job_uuid = str(uuid.uuid4())
self.job_id = f'UpdateUtcTime-{self.job_uuid[:8]}'
logger.info(f'Task {self.job_id} is starting')
try:
self.setup_inputs()
except:
logger.exception(f'Initializing inputs has failed for task {self.job_id}!')
self.pipeline.exit()
return
# List of tables for which utc times must be updated
table_list = [TmLog, SbmLog, BiaSweepLog, LfrKcoeffDump]
# Loop over tables
for current_table in table_list:
# Retrieve table rows which are flagged as predictive in the table
filters = [current_table.utc_time_is_predictive == True]
if self.start_time:
filters.append(current_table.utc_time >= str(self.start_time))
if self.end_time:
filters.append(current_table.utc_time < str(self.end_time))
filters = and_(*filters)
rows = query_db(self.session, current_table,
filters=filters,
tryouts=self.tryouts,
wait=self.wait,
limit=self.limit,
to_dict='records')
n_rows = len(rows)
logger.info(f'{n_rows} predictive time for {current_table.__tablename__} [{self.job_id}]')
if n_rows == 0:
continue
# Check/Update utc time
updated_rows = [current_row
for current_row in rows
if self.update_time(current_row)]
n_rows = len(updated_rows)
if n_rows > 0:
# Then commit changes in database
self.session.commit()
logger.info(f'{n_rows} updated in {current_table.__tablename__} [{self.job_id}]')
else:
logger.info(f'No data to update in {current_table.__tablename__} [{self.job_id}]')
def update_time(self, row, naif_id=NAIF_SOLO_ID):
"""
Check if utc_time in the input row is up to date.
If not, then update it using SPICE.
:param row: input table row to check/update (dictionary)
:return:True if row has been updated, False otherwise
"""
is_uptodate = False
if is_sclk_uptodate(row['utc_time'] + timedelta(days=1), Path(self.sclk_file).name):
row['utc_time'] = self.spice.obt2utc(naif_id, row['cuc_time'])
row['utc_time_is_predictive'] = False
row['insert_time'] = self.insert_time
logger.debug(f'{row} has been updated [{self.job_id}]')
is_uptodate = True
return is_uptodate | /roc_dingo-1.3.8-py3-none-any.whl/roc/dingo/tasks/update_utc_time.py | 0.599954 | 0.189896 | update_utc_time.py | pypi |
"""Contains dingo tasks to query the pipeline database."""
import json
import os
from sqlalchemy import and_
from roc.dingo.tools import query_db, valid_time
try:
from poppy.core.logger import logger
from poppy.core.db.connector import Connector
from poppy.core.task import Task
from poppy.core.target import PyObjectTarget, FileTarget
from poppy.core.conf import settings
except:
print("POPPy framework seems to not be installed properly!")
from roc.dingo.constants import PIPELINE_DATABASE, SQL_LIMIT, PIPELINE_TABLES, \
TIME_OUTPUT_STRFORMAT, TIME_INPUT_STRFORMAT
__all__ = ["ExportToJson"]
# ________________ Global Variables _____________
# (define here the global variables)
# ________________ Class Definition __________
# (If required, define here classes)
class ExportToJson(Task):
"""
Task to export content of the database into a JSON file.
"""
plugin_name = 'roc.dingo'
name = "export_to_json"
@Connector.if_connected(PIPELINE_DATABASE)
def run(self):
# Initialize output dictionary
output_json_dict = {}
# get the pipeline connector and get the database session
self.session = Connector.manager[PIPELINE_DATABASE].session
# Get list of pipeline database tables to query
table_list = self.pipeline.get('tables',
default=list(PIPELINE_TABLES.keys()), args=True)
# If --start_time keyword value is set, add an lower time limit
start_time = valid_time(self.pipeline.get('start_time',
default=[None],
args=True)[0])
# If --end_time keyword value is set, add an upper time limit
end_time = valid_time(self.pipeline.get('end_time',
default=[None],
args=True)[0])
# If --creation-time is set to True, then
# filtering packet_log and file_log table entries using creation time
# instead of insertion time (not possible for invalid_packet_log)
creation_time_flag = self.pipeline.get('creation-time',
default=False, args=True)
for current_table in table_list:
logger.info(f'Exporting entries for {current_table}')
# Get table model
current_model = PIPELINE_TABLES[current_table]
if current_table == 'packet_log':
# Define time to use for start_time/end_time filter
# Default is insertion time
if creation_time_flag:
time = current_model.utc_time
else:
time = current_model.insertion_time
elif current_table == 'invalid_packet_log':
time = current_model.insertion_time
elif current_table == 'file_log':
if creation_time_flag:
time = current_model.file_creation_date
else:
time = current_model.file_insert_date
else:
logger.warning(f'Unknown table: {current_table}')
continue
# Add input filters
filters = []
if start_time:
filters.append(time >= start_time) # greater or equal than ...
if end_time:
filters.append(time < end_time) # lesser than ...
filters = and_(*filters)
# Retrieve --limit keyword value
limit = self.pipeline.get('limit',
default=[SQL_LIMIT],
args=True)[0]
# get entries from pipeline table in ROC database
# Order by increasing time
entries_dict = query_db(self.session, current_model,
filters=filters,
limit=limit,
order_by=time,
to_dict='records',
)
entry_count = len(entries_dict)
if entry_count == limit:
logger.warning(f'Row limit has been reached {limit}')
elif entry_count == 0:
logger.info(f'No entry found in {current_model}')
else:
output_json_dict[current_table] = entries_dict
if not output_json_dict:
return
# Build output file path
output_json = self.pipeline.get('output_json', default=None)
if output_json is None:
database_name = 'rocdb'
for database in self.pipeline.properties.configuration["pipeline.databases"]:
if database['identifier'] == settings.PIPELINE_DATABASE:
database_name = database['login_info']['database']
break
filename_field_list = [database_name,
"-".join(table_list)]
if start_time:
filename_field_list.append(
start_time.strftime(TIME_OUTPUT_STRFORMAT))
if end_time:
filename_field_list.append(
end_time.strftime(TIME_OUTPUT_STRFORMAT))
output_json = "_".join(filename_field_list) + ".json"
output_json_path = os.path.join(self.pipeline.output, output_json)
# Write output JSON file
with open(output_json_path, 'w') as json_buff:
json.dump(output_json_dict, json_buff)
logger.info(f'{output_json_path} saved')
# ________________ Global Functions __________
# (If required, define here global functions) | /roc_dingo-1.3.8-py3-none-any.whl/roc/dingo/tasks/export.py | 0.53437 | 0.162081 | export.py | pypi |
import time
from datetime import datetime, timedelta
import hashlib
import pandas as pd
from sqlalchemy import and_
import xmltodict
from poppy.core.logger import logger
from poppy.core.db.connector import Connector
from poppy.core.task import Task
from poppy.core.target import FileTarget
from sqlalchemy.exc import IntegrityError
from roc.dingo.constants import PIPELINE_DATABASE, TIME_SQL_STRFORMAT, TRYOUTS, TIME_WAIT_SEC, SQL_LIMIT
from roc.dingo.models.data import SoloHkParam, ProcessQueue
from roc.dingo.tools import query_db, get_columns, get_or_create_in_db, bulk_insert, glob_paths
__all__ = ["SoloHkToDb"]
class SoloHkToDb(Task):
"""
Insert the content of input SOLO HK EDDS data files into the ROC database
"""
plugin_name = 'roc.dingo'
name = 'solohk_to_db'
def add_targets(self):
self.add_input(identifier='solo_hk_files',
many=True,
filepath=SoloHkToDb.get_solohk_files,
target_class=FileTarget)
@staticmethod
def get_solohk_files(pipeline):
try:
solo_hk_files = pipeline.args.solo_hk_files
if not isinstance(solo_hk_files, list):
solo_hk_files = [solo_hk_files]
return solo_hk_files
except:
# If not defined as input argument, then assume that it is already
# defined as target input
pass
@Connector.if_connected(PIPELINE_DATABASE)
def setup_inputs(self):
# get the input SOLO HK files
self.solo_hk_files = glob_paths(self.inputs['solo_hk_files'].filepath)
# Get or create failed_files list from pipeline properties
self.failed_files = self.pipeline.get(
'failed_files', default=[], create=True)
# Get or create processed_files list from pipeline properties
self.processed_files = self.pipeline.get(
'processed_files', default=[], create=True)
# Get tryouts from pipeline properties
self.tryouts = self.pipeline.get(
'tryouts', default=[TRYOUTS], create=True)[0]
# Get wait from pipeline properties
self.wait = self.pipeline.get(
'wait', default=[TIME_WAIT_SEC], create=True)[0]
# Retrieve --limit keyword value
self.limit = self.pipeline.get('limit',
default=[SQL_LIMIT],
args=True)[0]
# Get --to-queue keyword
self.to_queue = self.pipeline.get(
'to_queue', default=False, create=True)
# get a database session
self.session = Connector.manager[PIPELINE_DATABASE].session
# Get columns of the table
self.model = SoloHkParam
self.columns = get_columns(self.model, remove=['id'])
# Get today date time
self.today = datetime.today()
# Initialize task counters
self.inserted_counter = 0
self.invalid_counter = 0
def run(self):
logger.debug('[SoloHkToDb]: Task is starting')
try:
self.setup_inputs()
except:
logger.exception(
'[SoloHkToDb]: Initializing inputs has failed!')
self.pipeline.exit()
return
n_files = len(self.solo_hk_files)
logger.info(f'{n_files} input solo_hk_files to process')
if n_files == 0:
return
# Loop over each input file in the input list
for i, current_file in enumerate(self.solo_hk_files):
logger.info(f'Processing {current_file} ({n_files - i -1} remaining)')
# Parse input file
try:
current_data = self.parse_solohk_xml(current_file)
except:
logger.exception(f'Parsing {current_file} has failed!')
self.failed_files.append(current_file)
continue
n_element = current_data.shape[0]
logger.info(f'{n_element} <ParamSampleListElement> elements found in {current_file}')
if n_element == 0:
logger.info(f'{current_file} is empty, skip it')
self.processed_files.append(current_file)
continue
# Get list of pre-existing elements in the database
try:
# First convert input XML fields into
# expected table column names
# and compute extra values (SHA, utc_time)
current_data = self.xml_to_solohkparam(current_data)
except:
logger.exception(f'Preprocessing data from {current_file} has failed!')
self.failed_files.append(current_file)
continue
else:
n_data_to_insert = current_data.shape[0]
logger.info(f'Inserting {n_data_to_insert} elements from {current_file}')
if n_data_to_insert == 0:
self.processed_files.append(current_file)
continue
else:
data_to_insert = current_data[self.columns].to_dict('records')
try:
bulk_insert(self.session, self.model, data_to_insert)
except IntegrityError:
logger.debug(f'Some data already found in the database for '
f'{current_file}, attempting to insert new data only')
# Entries already found in the database, try to insert only new data
# First query existing data (only SHA is required)
# Define time range of request
start_time = current_data['utc_time'].min()
end_time = current_data['utc_time'].max()
existing_data = pd.DataFrame.from_records(self._get_existing_data(self.model, self.model.sha,
start_time=start_time,
end_time=end_time))
# Only keep unique SHA elements
current_data = current_data[
~current_data.sha.isin(existing_data.sha)]
n_data_to_insert = current_data.shape[0]
if n_data_to_insert == 0:
logger.info(f'No new data to insert for {current_file}')
self.processed_files.append(current_file)
continue
else:
# Re-insert only new elements
data_to_insert = current_data[
self.columns].to_dict('records')
bulk_insert(self.session, self.model, data_to_insert)
logger.debug(
f'{n_data_to_insert} new elements inserted for {current_file}')
except:
logger.exception(f'Inserting new data from {current_file} has failed!')
self.failed_files.append(current_file)
continue
else:
self.processed_files.append(current_file)
# Increment number of elements inserted
self.inserted_counter += n_data_to_insert
# Add current element days to process_queue table
if self.to_queue:
# Only add to process_queue table
# new days to insert
days_to_process = list(set(
[datetime.combine(current_date, datetime.min.time())
for current_date in
current_data['utc_time'].to_list()]))
for current_day in days_to_process:
current_entry = self._as_process_queue(current_day)
job, done, created = get_or_create_in_db(self.session, ProcessQueue, current_entry)
if done:
logger.debug(f'{current_entry} inserted in process_queue table')
else:
logger.error(f'{current_entry} cannot be inserted in process_queue table')
n_processed = len(self.processed_files)
n_failed = len(self.failed_files)
if n_processed > 0:
logger.info(f'{self.inserted_counter} new elements inserted from {n_processed} files')
if n_failed > 0:
logger.error(f'Insertion has failed for {n_failed} files!')
def _get_existing_data(self, model, fields,
start_time=None,
end_time=None):
"""
Query database to return existing data for a given table
:param model: class of the table
:param fields: fields to query
:return: returned rows (as a list of dictionaries)
"""
# Get list of existing data in the database
filters = []
# Add start_time/end_time filters (if passed)
if start_time:
filters.append(model.utc_time >= str(
start_time - timedelta(hours=1)))
if end_time:
filters.append(model.utc_time <= str(
end_time + timedelta(hours=1)))
if fields is None:
fields = model
results = query_db(self.session, fields,
filters=and_(*filters),
tryouts=self.tryouts,
wait=self.wait,
limit=self.limit,
to_dict='records')
return results
def parse_solohk_xml(self, solo_hk_xml):
"""
Parse input SOLO HK EDDS XML file.
:param solo_hk_xml: Path of the input Solo HK EDDS file
:return: List of <ParamSampleListElement> elements
"""
logger.debug(f'Parsing {solo_hk_xml} ...')
with open(solo_hk_xml, 'r') as xml:
xml_data = xmltodict.parse(xml.read())['ns2:ResponsePart']['Response'][
'ParamResponse']['ParamSampleList']['ParamSampleListElement']
if not isinstance(xml_data, list):
xml_data = [xml_data]
xml_data = pd.DataFrame.from_records(xml_data)
return xml_data
def xml_to_solohkparam(self, xml_data):
"""
Convert <ParamSampleListElement> XML elements
as entries to be inserted in the solo_hk_param table of the ROC database
:param xml_data: pandas.Dataframe containing the <ParamSampleListElement> elements from input XML file
:return: pandas.Dataframe with expected fields values for solo_hk_param table
"""
# First keep only valid elements
solohk_data = xml_data.loc[xml_data[
'Validity'].isin(['VALID'])]
solohk_data.reset_index(drop=True, inplace=True)
if not solohk_data.shape[0] == 0:
# Define field names as expected for solo_hk_param table
solohk_data = solohk_data.rename(columns={
'Name': 'name',
'Unit': 'unit',
'Description': 'description',
'EngineeringValue': 'eng_value',
'RawValue': 'raw_value',
}, inplace=False,)
# Convert elements times in datetime object
solohk_data['utc_time'] = solohk_data.apply(
lambda x: datetime.strptime(x['TimeStampAsciiA'],
TIME_SQL_STRFORMAT), axis=1)
# Compute SHA256 of the elements
solohk_data['sha'] = solohk_data.apply(
self._get_sha,
axis=1)
# Make sure to have unique elements (unique SHA values)
solohk_data.drop_duplicates(subset=['sha'], inplace=True)
return solohk_data
def _get_sha(self, data):
"""
Compute the SHA of the input element (from all parameters in data)
:param data: Current SOLO HK parameter element
:return: associated SHA256
"""
sha = hashlib.sha256()
sha.update(data['utc_time'].isoformat().encode('utf-8'))
sha.update(data['name'].encode('utf-8'))
sha.update(data['description'].encode('utf-8'))
sha.update(data['raw_value'].encode('utf-8'))
sha.update(data['eng_value'].encode('utf-8'))
return str(sha.hexdigest())
def _as_process_queue(self, current_date):
"""
Return expected format for a process_queue table entry
from input current date
:param current_date: datetime.date() object to convert
:return: dictionary for data_queue table entry
"""
start_time = datetime.combine(current_date, datetime.min.time())
return {'dataset_id': 'SOLO_HK_PLATFORM',
'start_time': start_time,
'insert_time': self.today} | /roc_dingo-1.3.8-py3-none-any.whl/roc/dingo/tasks/solohk_to_db.py | 0.493653 | 0.163947 | solohk_to_db.py | pypi |
"""Contains dingo tasks to insert EFECS event data into the ROC database."""
import json
import os
from datetime import datetime
import sqlalchemy.exc
import xmltodict
from poppy.core.logger import logger
from poppy.core.db.connector import Connector
from poppy.core.task import Task
from poppy.core.target import FileTarget, PyObjectTarget
from roc.dingo.constants import PIPELINE_DATABASE, TRYOUTS, TIME_WAIT_SEC, TIME_EFECS_STRFORMAT
from roc.dingo.models.data import EfecsEvents
from roc.dingo.tools import get_columns, insert_in_db, query_db, delete_in_db, glob_paths
__all__ = ['EfecsToDb']
class EfecsToDb(Task):
"""
Insert EFECS event data
into the ROC database
"""
plugin_name = 'roc.dingo'
name = 'efecs_to_db'
def add_targets(self):
self.add_input(identifier='efecs_files',
many=True,
filepath=EfecsToDb.get_efecs_files,
target_class=FileTarget)
@staticmethod
def get_efecs_files(pipeline):
try:
efecs_files = pipeline.args.efecs_files
if not isinstance(efecs_files, list):
efecs_files = [efecs_files]
return efecs_files
except:
# If not defined as input argument, then assume that it is already
# defined as target input
pass
@Connector.if_connected(PIPELINE_DATABASE)
def setup_inputs(self):
# get the input files
self.efecs_files = glob_paths(self.inputs['efecs_files'].filepath)
# Get tryouts from pipeline properties
self.tryouts = self.pipeline.get(
'tryouts', default=[TRYOUTS], create=True)[0]
# Get wait from pipeline properties
self.wait = self.pipeline.get(
'wait', default=[TIME_WAIT_SEC], create=True)[0]
# get a database session
self.session = Connector.manager[PIPELINE_DATABASE].session
# Get table class
self.model = EfecsEvents
# Get table columns (remove primary key)
self.columns = get_columns(self.model, remove=['id'])
def run(self):
logger.debug('Task EfecsToDb is starting')
try:
self.setup_inputs()
except:
logger.exception(
'Initializing inputs for task EfecsToDb has failed!')
self.pipeline.exit()
return
# Number of EFECS files
n_files = len(self.efecs_files)
logger.info(f'{n_files} EFECS files to process')
# loop over each input E-FECS file
new_insert_count = 0
already_insert_count = 0
failed_insert_count = 0
for i, current_file in enumerate(self.efecs_files):
self.current_file = current_file
logger.info(f'Processing {current_file} ({n_files - i - 1} remaining)')
# Parse file
try:
efecs_data = parse_efecs(current_file)
# Make sure to always use list for EFECS events
# And get total number of events in the EFECS file
event_count = 0
for key, val in efecs_data['events'].items():
if not isinstance(val, list):
efecs_data['events'][key] = [val]
event_count += len(efecs_data['events'][key])
except:
logger.exception(f'Parsing {current_file} has failed!')
continue
else:
# GET LTP counter from input filename (e.g., "EFECS_M05.xml")
ltp_count = int(efecs_data['header2']['@SPKT_ltp_number'])
# Get EFECS file generation time
gen_time = datetime.strptime(efecs_data['header']['@gen_time'],
TIME_EFECS_STRFORMAT)
# Check database content against EFECS file info
if self.is_uptodate(ltp_count, gen_time, event_count):
logger.info(f'{event_count} EFECS events from {current_file} already inserted, skip it')
continue
else:
logger.info(f'{event_count} EFECS events will be inserted from {current_file}')
# Loops over each EFECS event found in the input file
for key, val in efecs_data['events'].items():
for current_entry in val:
data_to_insert = {'name': key}
data_to_insert['utc_time'] = datetime.strptime(current_entry.pop('@time'),
TIME_EFECS_STRFORMAT)
data_to_insert['attributes'] = {
attr_key.replace("@", ""): attr_val
for attr_key, attr_val in current_entry.items()
}
data_to_insert['ltp_count'] = ltp_count
data_to_insert['gen_time'] = gen_time
# Define columns to check in case of update
update_fields_kwargs = {col_name:col_val
for col_name, col_val in data_to_insert.items()
if col_name in ['name', 'utc_time']}
# Insert / update efecs data into the database
status = insert_in_db(self.session, self.model, data_to_insert,
update_fields=data_to_insert,
update_fields_kwargs=update_fields_kwargs,
tryouts=self.tryouts,
wait=self.wait)
if status < 0:
logger.error(f'Cannot insert {data_to_insert} from {current_file}!')
failed_insert_count += 1
elif status == 0:
new_insert_count += 1
logger.debug(f'{data_to_insert} inserted')
else:
already_insert_count += 1
logger.debug(f'{data_to_insert} already inserted (updated)')
if new_insert_count > 0:
logger.info(f'{new_insert_count} EFECS events inserted')
if already_insert_count > 0:
logger.info(f'{already_insert_count} EFECS events updated')
if failed_insert_count > 0:
logger.warning(f'{failed_insert_count} EFECS events insertion have failed!')
def is_uptodate(self, ltp_count, gen_time, event_count):
"""
Compare content of the pipeline.efecs_events table with
input input EFECS file ltp count, generation time and number of events.
Return True if the database and file content are the same.
:param ltp_count: EFECS file LTP count (integer)
:param gen_time: EFECS file generation time (datetime.datetime object)
:param event_count: Number of EFECS events in the file
:return: True if EFECS file events are already found in the database, False otherwise
"""
is_found = False
try:
# Query database to get EFECS events with
# input values for ltp_count and gen_time
filters = (EfecsEvents.ltp_count == ltp_count)
rows = query_db(self.session, EfecsEvents.gen_time,
filters=filters,
tryouts=self.tryouts,
wait=self.wait,
to_dict='records')
row_count = len(rows)
if row_count == 0:
logger.debug('No entry found for {ltp_count} in the database')
else:
is_found = (all(current_event['gen_time'] == gen_time for current_event in rows) and
row_count == event_count)
if not is_found and row_count > 0 and gen_time < rows[0]['gen_time']:
logger.warning(f'Input EFECS file generation time ({gen_time}) for {self.current_file}'
f' is older than in database ({rows[0]["gen_time"]})!')
except:
# If cannot compare, assume is_found=False by default
logger.exception('Database content cannot be compared!')
return is_found
def parse_efecs(efecs_file):
"""
Parse EFECS XML input file.
:param efecs_file: Path to EFECS file to parse
:return: Content of input XML as a dictionary
"""
with open(efecs_file, 'r') as xml:
xml_dict = xmltodict.parse(xml.read())
return xml_dict['eventfile'] | /roc_dingo-1.3.8-py3-none-any.whl/roc/dingo/tasks/efecs_to_db.py | 0.579043 | 0.160891 | efecs_to_db.py | pypi |
import numpy as np
from scipy import stats
from typing import Union
array_like = Union[list, tuple, np.ndarray]
def log_likelihood(f_obs: Union[float, array_like], p_exp: Union[float, array_like]) -> float:
"""Computes the Log Likelihood
This is the log likelihood function that appears on page 145 of Dunn (2011)
and is also used in the ROC toolbox of Koen, Barrett, Harlow, & Yonelinas
(2017; see https://github.com/jdkoen/roc_toolbox). The calculation is:
$\sum_i^{j}O_i\log(P_i)$ where $j$ refers the number of response
categories.
Parameters
----------
f_obs : array_like
The observed frequencies (counts; non-cumulative) for each of the
response categories.
p_exp : array_like
The expected probabilities (non-cumulative) for each of the response
categories.
Returns
-------
log_likelihood : float
The log likelihood value for the given inputs.
"""
return (np.array(f_obs) * np.log(np.array(p_exp))).sum()
def squared_errors(observed: array_like, expected: array_like) -> float:
"""Computes the sum of squared errors between observed values and those
which were computed by the model.
Parameters
----------
observed : array_like
Array of observed values.
expected : array_like
Array of expected (model-predicted) values
Returns
-------
np.array
An array, equal to the length of the inputs, containing the computed
squared error values.
"""
return (observed - expected)**2
def aic(k: int, LL: float) -> float:
"""Computes Akaike's information criterion (AIC; https://en.wikipedia.org/wiki/Akaike_information_criterion).
Is an estimator of quality of each model relative to others, enabling model
comparison and selection.
Parameters
----------
k : int
The number of estimated parameters in the model.
LL : float
The log-likelihood value (see `log_likelihood`).
Returns
-------
float
The AIC score.
"""
return 2 * k - 2 * LL
def bic(k: int, n: int, LL: float) -> float:
"""Computes the Bayesian information criterion (BIC; https://en.wikipedia.org/wiki/Bayesian_information_criterion).
Is an estimator of quality of each model relative to others, enabling model
comparison and selection.
Parameters
----------
k : int
The number of estimated parameters in the model.
n : int
The number of data points in the observed data.
LL : float
The log-likelihood value (see `log_likelihood`).
Returns
-------
float
The BIC score.
"""
return k * np.log(n) - 2 * LL | /roc_face-0.1.1-py3-none-any.whl/roc_face/fit_functions.py | 0.950088 | 0.870377 | fit_functions.py | pypi |
import numpy as np
from numpy.typing import ArrayLike
from prettytable import PrettyTable
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from scipy import stats
from typing import Union, Optional, List
numeric = Union[int, float, np.number]
array_like = Union[list, tuple, np.ndarray]
def arrays_equal_length(a: array_like, b: array_like):
if len(a) != len(b):
return False
else:
return True
def keyval_table(**kwargs):
t = PrettyTable([0,1])
for key, val in kwargs.items():
if isinstance(val, np.ndarray):
for i, x in enumerate(val):
t.add_row([f"{key} {i+1}", x])
else:
t.add_row([key, val])
return t
def accumulate(arr: array_like):
return np.cumsum(arr)
def deaccumulate(arr: array_like) -> np.ndarray:
return np.diff(np.insert(arr, 0, 0)) # insert a 0 at the start
def compute_proportions(
arr: array_like,
corrected: bool=True,
truncate: bool=True
) -> np.ndarray:
"""Compute the proportions of a response array.
The input should be response counts for each criterion category for either
signal OR noise datasets.
Parameters
----------
arr : array_like
The input array. EITHER: all responses to signal trials, OR all
responses to noise trials.
corrected : bool, optional
If True, adds a small amount, equal to i/n (where i is the index of the
array and `n` is the number of elements in the array) to each
accumulated value, and also adds 1 to the total number of responses
defined as the sum of the un-accumulated array (or the final element of
the accumulated array). The default is True.
truncate : bool, optional
Whether to remove the final element of the returned array. This is
typically required because (1) this value is always equal to 1 and is
therefore implied, and (2) a value of 1 cannot be converted to a
z-score, which is required to convert the resulting output from ROC- to
z-space. The default is True.
Raises
------
ValueError
If the last element of the resulting array is not equal to 1.
Returns
-------
np.ndarray
The accumulated array of proportions.
Example
-------
>>> s = [505, 248, 226, 172, 144, 93]
>>> compute_proportions(s)
array([0.3636909 , 0.54235661, 0.70518359, 0.82913367, 0.93292537])
>>> compute_proportions(s, corrected=False)
array([0.36383285, 0.5425072 , 0.70533141, 0.82925072, 0.93299712])
>>> compute_proportions(s, truncate=False)
array([0.3636909, 0.54235661, 0.70518359, 0.82913367, 0.93292537, 1])
"""
a = accumulate(arr)
if corrected:
f = [(x + i / len(a)) / (max(a) + 1) for i, x in enumerate(a, start=1)]
else:
f = list(a / max(a))
if f[-1] != 1:
raise ValueError(f"Expected max accumulated to be 1 but got {f[-1]}.")
if truncate:
f.pop()
return np.array(f)
def plot_roc(
signal: array_like,
noise: array_like,
from_freqs: bool=True,
ax: Optional[Axes]=None,
chance: bool=True,
**kwargs
) -> Axes:
"""A utility to plot ROC curves. Requires signal and noise arrays in
probability space. Accepts scatter plot keyword arguments.
Parameters
----------
signal : array_like
Signal array of responses. When `from_freqs=True` (the default), the
values are assumed to be the raw observed frequencies for each response
category.
noise : array_like
array of responses. When `from_freqs=True` (the default), the
values are assumed to be the raw observed frequencies for each response
category.
from_freqs: True, optional
Specifies whether the arguments to `signal` and `noise` contain the
raw frequency data (`from_freqs=True`) or if they are already prepared
and in ROC space (cumulative probabilities; `from_freqs=False`). The
default is True.
ax : Optional[Axes], optional
Matplotlib Axes object to plot to, if already defined. The default is
None.
chance : bool, optional
Whether or not to plot the diagonal chance line (0, 0), (1, 1). The
default is True.
**kwargs : TYPE
Keyword arguments for the matplotlib.pyplot.scatter function. See
https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.scatter.html.
Returns
-------
ax : Axes
A matplotlib Axes object with the plotted signal & noise data in
probability space.
"""
if from_freqs:
signal = compute_proportions(signal)
noise = compute_proportions(noise)
if ax is None:
fig, ax = plt.subplots()
if chance:
ax.plot([0,1], [0,1], c='k', lw=1, ls='dashed')
ax.scatter(noise, signal, **kwargs, zorder=1e10)
ax.axis('square')
ax.set(xlabel='FP', ylabel='TP')
return ax
def regress(x: array_like, y: array_like, poly: int=1) -> tuple:
"""Simple regression with optional polynomial expansion.
Parameters
----------
x : array_like
The set of x values (predictor).
y : array_like
The set of y values (outcome).
poly : int, optional
The order of the polynomial regression. The
default is 1 (linear regression).
Returns
-------
coefs : array_like
Parameters of the regression, obtained using numpy.polyfit (see the
NumPy docs), in descending order from highest degree to lowest, with
element `coefs[-1]` being the intercept of the line.
y_pred : array_like
The predictions for y given the parameters.
"""
coefs = np.polyfit(x, y, poly)
x_ = np.ones(len(x)).reshape(-1, 1)
for degree in range(1, poly+1):
x_ = np.c_[x_, np.power(x, degree)]
y_pred = x_ @ coefs[::-1]
return coefs, y_pred
def linear_equation(coefs: Union[float, array_like], precision: int=2):
"""Generate a string representation of a polynomial regression equation.
Parameters
----------
coefs : array_like
An array of regression coefficients, highest power first (see the docs
for `numpy.polyfit` return values).
Examples
--------
>>> linear_equation([.69])
'y = 0.69'
>>> linear_equation([.42, .69])
'y = 0.69 + 0.42x'
>>> linear_equation([1.4195, -0.89324, .42013, .69069], precision=4)
'y = 0.6907 + 0.4201x - 0.8932x^2 + 1.4195x^3'
Returns
-------
equation : str
The string representation of an equation.
"""
if isinstance(coefs, float):
coefs = [coefs]
intercept = np.round(coefs[-1], precision)
equation = f'y = {intercept} + '
if len(coefs) == 1:
return equation.replace(' + ', '')
coefs = coefs[:-1][::-1]
equation += ' + '.join([f'{np.round(coef, precision)}x^{i+1}' for i, coef in enumerate(coefs)])
equation = equation.replace('+ -', '- ')
equation = equation.replace('^1', '')
return equation
def plot_zroc(
signal: array_like,
noise: array_like,
from_freqs: bool=True,
reg: bool=True,
poly: int=1,
data: bool=True,
show_equation: bool=True,
ax: Optional[Axes]=None,
scatter_kwargs: dict={},
line_kwargs: dict={}
):
"""A utility to plot z-ROC curves. Requires signal and noise arrays in
probability space. Accepts scatter plot keyword arguments.
Parameters
----------
signal : array_like
Signal array of responses. When `from_freqs=True` (the default), the
values are assumed to be the raw observed frequencies for each response
category.
noise : array_like
array of responses. When `from_freqs=True` (the default), the
values are assumed to be the raw observed frequencies for each response
category.
from_freqs: True, optional
Specifies whether the arguments to `signal` and `noise` contain the
raw frequency data (`from_freqs=True`) or if they are already prepared
and in z-ROC space (z-scores of the cumulative probabilities;
`from_freqs=False`). The default is True.
ax : Optional[Axes], optional
Matplotlib Axes object to plot to, if already defined. The default is
None.
reg : bool, optional
Whether or not to draw a regression line. If True, see `poly`. The
default is True.
poly : Optional[int], optional
The order of the polynomial regression line. The
default is 1 (linear regression).
show_equation : bool, optional
Whether to show the equation as the label of the line (if plotted).
scatter_kwargs : dict, optional
Keyword arguments for the matplotlib.pyplot.scatter function. See
https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.scatter.html.
line_kwargs : dict, optional
Keyword arguments for the matplotlib.pyplot.plot function. See
https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.plot.html.
Returns
-------
ax : Axes
A matplotlib Axes object with the plotted signal & noise data in
z-space.
"""
if from_freqs:
signal = compute_proportions(signal)
noise = compute_proportions(noise)
z_signal = stats.norm.ppf(signal)
z_noise = stats.norm.ppf(noise)
if ax is None:
fig, ax = plt.subplots()
ax.axhline(0, lw=1, ls='dashed', c='k')
ax.axvline(0, lw=1, ls='dashed', c='k')
if data:
ax.scatter(z_noise, z_signal, zorder=1e10, **scatter_kwargs)
if reg:
coefs, y_pred = regress(x=z_noise, y=z_signal, poly=poly)
if show_equation and 'label' not in line_kwargs:
# Only show the equation if requested and if label not already provided.
line_kwargs['label'] = "$" + linear_equation(coefs) + "$"
ax.plot(z_noise, y_pred, **line_kwargs)
ax.axis('square')
ax.set(xlabel='z(FP)', ylabel='z(TP)')
return ax
def auc(x: array_like, y: array_like) -> float:
"""The area under the curve. In the context of ROC curves, it is equal to
the probability that the classifier will be able to discriminate signal
from noise.
Parameters
----------
x : array_like
The sample points corresponding to the false positive probabilities.
y : array_like
The sample points corresponding to the true positive probabilities.
Returns
-------
float or np.ndarray
The area under the curve. For more details, see
https://numpy.org/doc/stable/reference/generated/numpy.trapz.html.
"""
return np.trapz(x=x, y=y)
def plot_performance(
dprime: float,
scale: float=1,
ax: Optional[Axes]=None,
shade: bool=False,
noise_kwargs: dict={'c': 'k'},
signal_kwargs: dict={}
):
"""Plot a signal & noise distribution for a specific value of d`.
Parameters
----------
dprime : float
Sensitivity index.
scale : float, optional
The standard deviation of the signal distribution. The default is 1.
ax : Optional[Axes], optional
Matplotlib Axes object to plot to, if already defined. The default is
None.
shade : bool, optional
Whether to shade the distributions. The default is False.
noise_kwargs : dict, optional
Keyword arguments (see matplotlib.pyplot.plot) for the noise
distribution. The default is {'c': 'k'}.
signal_kwargs : dict, optional
Keyword arguments (see matplotlib.pyplot.plot) for the signal
distribution. The default is {}.
Returns
-------
ax : Axes
A matplotlib Axes object displaying the signal & noise distributions
for the specified value of d`.
"""
strength = np.linspace(-4, 4+dprime*scale, 1000)
noisedist = stats.norm.pdf(strength, loc=0, scale=1)
signaldist = stats.norm.pdf(strength, loc=dprime, scale=scale)
if ax is None:
fig, ax = plt.subplots()
ax.plot(strength, noisedist, label='noise', **noise_kwargs)
noise_clr = plt.gca().lines[-1].get_color()
ax.plot(strength, signaldist, label='signal', **signal_kwargs)
signal_clr = plt.gca().lines[-1].get_color()
plt.gca().lines[-1].get_linewidth()
if shade:
ax.fill_between(strength, noisedist, color=noise_clr, alpha=1/3)
ax.fill_between(strength, signaldist, color=signal_clr, alpha=1/3)
ax.set(yticklabels=[], yticks=[])
return ax | /roc_face-0.1.1-py3-none-any.whl/roc_face/utils.py | 0.939927 | 0.705696 | utils.py | pypi |
import numpy as np
from scipy.stats import norm
from typing import Union, Optional
zscore = norm.ppf
def compute_performance(tpr: float, fpr: float, z_intercept: Optional[float]=None, z_slope: Optional[float]=None) -> dict:
performance = {
'TPR': tpr,
'FPR': fpr,
'dprime': d_prime(tpr, fpr),
'aprime': a_prime(tpr, fpr),
'cbias': c_bias(tpr, fpr),
'beta': beta(tpr, fpr),
}
if z_intercept is not None and z_slope is not None:
performance['Az'] = a_z(z_intercept, z_slope)
return performance
def d_prime(tpr: Union[float, np.ndarray], fpr: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Sensitivity measure d\`, element-wise.
Parameters
----------
tpr : array_like
The true positive rate(s) (range 0 <= `tpr` <= 1)
fpr : array_like
The false positive rate(s) (range 0 <= `fpr` <= 1)
Returns
-------
ndarray
d\`. An array of the same length as tpr, fpr, containing the d\`
values. If `tpr` and `fpr` are single floats then a single float
corresponding to d\` is returned.
Notes
-----
Estimates the ability to discriminate between instances of signal and
noise. The larger the magnitude, the greater the sensitivity.
Example
-------
>>> d = d_prime(0.75, 0.21)
>>> print(d)
1.480910997214322
"""
return zscore(tpr) - zscore(fpr)
def c_bias(tpr: Union[float, np.ndarray], fpr: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Bias measure `c`.
Parameters
----------
tpr : array_like
The true positive rate(s) (range 0 <= `tpr` <= 1). Can be
a single float value or an array.
fpr : array_like
The false positive rate(s) (range 0 <= `fpr` <= 1). Can be
a single float value or an array.
Returns
-------
ndarray
c. An array of the same length as tpr, fpr, containing the c values.
If `tpr` and `fpr` are single floats then a single float corresponding
to c is returned.
Notes
-----
Estimates the criterion value relative to the intersection of the signal
and noise distributions, centering on 0 (no bias). Positive values
indicate conservative bias, while negative values indicate a liberal
bias.
Example
-------
>>> c = c_bias(0.75, 0.21)
>>> print(c)
0.06596574841107933
"""
return 1 / 2 * -(zscore(tpr) + zscore(fpr))
def a_prime(tpr: float, fpr:float) -> float:
"""The sensitivity index `A\``.
Parameters
----------
tpr : array_like
The true positive rate(s) (range 0 <= `tpr` <= 1). Can be
a single float value or an array.
fpr : array_like
The false positive rate(s) (range 0 <= `fpr` <= 1). Can be
a single float value or an array.
Returns
-------
ndarray
A\`. An array of the same length as tpr, fpr, containing the A\`
values. If `tpr` and `fpr` are single floats then a single float
corresponding to A\` is returned.
Notes
-----
A non-parametric measure of discrimination that estimates
sensitivity as the area under an "average" ROC curve, for a
single true- and false-positive rate. For further details,
see Snograss & Corwin (1988). Note that `d\`` is preferred.
"""
if tpr >= fpr:
numerator = ((tpr - fpr) * (1 + tpr - fpr))
denominator = 4 * tpr * (1 - fpr)
return 0.5 + (numerator / denominator)
else:
numerator = ((fpr - tpr) * (1 + fpr - tpr))
denominator = 4 * fpr * (1 - tpr)
return 0.5 - (numerator / denominator)
def beta(tpr: Union[float, np.ndarray], fpr: Union[float, np.ndarray]):
"""The bias measure, β.
Parameters
----------
tpr : array_like
The true positive rate(s) (range 0 <= `tpr` <= 1). Can be
a single float value or an array.
fpr : array_like
The false positive rate(s) (range 0 <= `fpr` <= 1). Can be
a single float value or an array.
Returns
-------
ndarray
β. An array of the same length as tpr, fpr, containing the β values.
If `tpr` and `fpr` are single floats then a single float corresponding
to β is returned.
Notes
-----
β is a likelihood ratio measure that estimates the criterion
value by computing the ratio of the heights of the signal &
noise distributions. It is a ratio of the density of the signal
distribution at the criterion divided by the same density for
the noise distribution.
It is the ratio of the likelihood of obtaining an observation
equal to the criterion given a signal to the likelihood of
obtaining this observation given noise. Or: L(x_c)|S / L(x_c)|N.
See Snodgrass & Corwin (1988) for details.
"""
return np.exp( (zscore(fpr)**2 - zscore(tpr)**2) / 2 )
def beta_doubleprime(tpr: Union[float, np.ndarray], fpr: Union[float, np.ndarray], donaldson: bool=False) -> Union[float, np.ndarray]:
"""The bias index `β\`\``.
Parameters
----------
tpr : array_like
The true positive rate(s) (range 0 <= `tpr` <= 1). Can be
a single float value or an array.
fpr : array_like
The false positive rate(s) (range 0 <= `fpr` <= 1). Can be
a single float value or an array.
donaldson : bool, optional
Use Donaldson's calculation, by default False. If this value is
`False`, use Grier's (1971) as cited in Stanislaw & Todorov (1999).
Returns
-------
float, np.ndarray
β\`\`. An array of the same length as tpr, fpr, containing the β\`\`
values. If `tpr` and `fpr` are single floats then a single float
corresponding to β\`\` is returned.
Notes
-----
Grier's `β\`\`` is A2-A1 divided by sum(A1, A2). If A1 < A2,
there is a liberal bias, and if A2 < A1, there is a conservative
bias.
Default is Grier's (1971) as cited in Stanislaw & Todorov (1999), but
can be changed to use Donaldson's calculation.
"""
fnr = 1 - tpr
tnr = 1 - fpr
if donaldson:
numerator = (fnr * tnr) - (tpr * fpr)
denominator = (fnr * tnr) + (tpr * fpr)
return numerator / denominator
else:
numerator = (tpr * fnr) - (fpr * tnr)
denominator = (tpr * fnr) + (fpr * tnr)
return np.sign(tpr - fpr) * numerator / denominator
def a_z(z_intercept: float, z_slope: float) -> float:
# See Stanislaw & Todorov (1999). Useful for checking the d` assumption of
# equal variances: slope should = 1 (or log(slope) == 0).
return norm.cdf( z_intercept / np.sqrt( 1 + z_slope**2 ) )
if __name__ == '__main__':
print(d_prime(.75, .21))
print(c_bias(.75, .21))
print(a_prime(.75, .21))
print(beta(.75, .21))
print(beta_doubleprime(.75, .21))
print(beta_doubleprime(.75, .21, donaldson=True))
print(compute_performance(.75, .21, 1.23, 0.79)) | /roc_face-0.1.1-py3-none-any.whl/roc_face/measures.py | 0.961615 | 0.798658 | measures.py | pypi |
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from typing import Union, Optional
from roc_face.base import _BaseModel
from roc_face.utils import plot_roc, array_like
class HighThreshold(_BaseModel):
"""High Threshold model class. Inherits functionality from _BaseModel class.
See Yonelinas et al. (1996).
Parameters
----------
signal : array_like
An array of observed response counts to signal-present trials.
noise : array_like
An array of observed response counts to noise trials.
Attributes
----------
TODO
"""
__modelname__ = 'High Threshold'
_has_criteria = False
def __init__(self, signal: array_like, noise: array_like):
self._named_parameters = {'R': {'initial': 0.99, 'bounds': (0, 1)}}
self._n_named_parameters = len(self._named_parameters) + 1 # Required because `g` (guess) parameter is implicit
self.label = ''.join([i[0] for i in self.__modelname__.split()])
super().__init__(signal, noise)
def compute_expected(self, R: float, full: bool=False) -> tuple:
"""Compute the expected signal and noise array using the High Threshold
model.
Parameters
----------
R : float
Threshold parameter, corresponding to the probability of
recollection - the only variable to be solved for in the
High Threshold model.
full : bool, optional
Whether to extend the model line across probability space. The
default is False.
Returns
-------
model_noise : array_like
The expected values for the noise array, according to the model.
model_signal : array_like
The expected values for the signal array, according to the model.
"""
if full:
model_noise = np.array([0, 1])
else:
model_noise = self.obs_noise.roc
model_signal = (1 - R) * model_noise + R
return model_noise, model_signal
class SignalDetection(_BaseModel):
"""Signal Detection model class. Inherits functionality from _BaseModel
class.
See Wixted.
Parameters
----------
signal : array_like
An array of observed response counts to signal-present trials.
noise : array_like
An array of observed response counts to noise trials.
equal_variance: bool, Optional
Whether the variance of the signal distribution should be equal to that
of the noise distribution, or not. If not, then the signal variance is
considered as an additional parameter to be solved for.
Attributes
----------
TODO
"""
__modelname__ = 'Equal Variance Signal Detection'
_has_criteria = True
def __init__(self, signal: array_like, noise: array_like, equal_variance: bool=True):
self._named_parameters = {
'd': {'initial': 1, 'bounds': (None, None)}, # d may need to start above the likely value for convergence with some fit statistics
# 'scale': {'initial': 1, 'bounds': (1, 1 if equal_variance else None)},
}
if not equal_variance:
self.__modelname__ = self.__modelname__.replace('Equal', 'Unequal')
self._named_parameters['scale'] = {'initial': 1, 'bounds': (1, None)}
self.label = ''.join([i[0] for i in self.__modelname__.split()])
super().__init__(signal, noise)
@property
def scale(self):
"""float: the standard deviation (scale) of the signal distribution."""
return self.fitted_parameters.get('scale', 1.0)
def compute_expected(
self,
d: float,
scale: float=1,
criteria: Optional[array_like]=None
) -> tuple:
"""Compute the expected signal and noise array using the Signal Detection
model.
Parameters
----------
d : float
Sensitivity parameter. Corresponds to the distance between the
signal and noise distributions.
scale : float, optional
The standard deviation of the signal distribution. The default is 1.
criteria : array_like, optional
Criterion parameter values. The length corresponds to the number of
response categories minus 1 which are solved for. The
default is None.
Returns
-------
model_noise : array_like
The expected values for the noise array, according to the model.
model_signal : array_like
The expected values for the signal array, according to the model.
"""
if criteria is None:
criteria = np.arange(-5, 5, 0.01)
model_signal = stats.norm.cdf(d / 2 - np.array(criteria), scale=scale)
model_noise = stats.norm.cdf(-d / 2 - np.array(criteria), scale=1)
return model_noise, model_signal
class DualProcess(_BaseModel):
"""Dual Process model class. Inherits functionality from _BaseModel
class.
This is a combination of the equal-variance signal detection and the high
threshold models.
Parameters
----------
signal : array_like
An array of observed response counts to signal-present trials.
noise : array_like
An array of observed response counts to noise trials.
Attributes
----------
TODO
"""
__modelname__ = 'Dual Process Signal Detection'
_has_criteria = True
label = 'DPSD'
def __init__(self, signal: array_like, noise: array_like):
self._named_parameters = {
'd': {'initial': 1, 'bounds': (None, None)},
'R': {'initial': 0.999, 'bounds': (0, 1)},
}
# self.label = ''.join([i[0] for i in self.__modelname__.split()])
super().__init__(signal, noise)
@property
def familiarity(self):
"""float: Estimate of familiarity.
The probability that an item is familiar (see Yonelinas, 1996, p.431)."""
if not hasattr(self, 'parameter_estimates'):
return None
d = self.parameter_estimates.get('d')
c_x = self.parameter_estimates['criteria'][self.signal_boundary]
return stats.norm.cdf( d / 2 - c_x )
@property
def recollection(self):
"""float: Estimate of recollection."""
if not hasattr(self, 'parameter_estimates'):
return None
return self.parameter_estimates.get('R')
def compute_expected(
self,
d: float,
R: float,
criteria: Optional[array_like]=None
) -> tuple:
"""Compute the expected signal and noise array using the Dual Process
model.
See Yonelinas (1996).
Parameters
----------
d : float
Sensitivity parameter. Corresponds to the distance between the
signal and noise distributions. Viewed as an index of familiarity
under the dual-process model.
R : float
Threshold parameter, corresponding to the probability of
recollection.
criteria : array_like, optional
Criterion parameter values. The length corresponds to the number of
response categories minus 1 which are solved for. The
default is None.
Returns
-------
model_noise : array_like
The expected values for the noise array, according to the model.
model_signal : array_like
The expected values for the signal array, according to the model.
"""
if criteria is None:
criteria = np.arange(-5, 5, 0.01)
model_noise = stats.norm.cdf(-d / 2 - criteria)
model_signal = R + (1 - R) * stats.norm.cdf(d / 2 - criteria)
return model_noise, model_signal
if __name__ == '__main__':
signal = [505,248,226,172,144,93]
noise = [115,185,304,523,551,397]
fit_method = 'G'
cumulative = True # When false, matches ROC toolbox but gives poor fit for HT model.
ht = HighThreshold(signal, noise)
ht.fit(fit_method, cumulative=cumulative)
print(ht.results)
evsd = SignalDetection(signal, noise, equal_variance=True)
evsd.fit(fit_method, cumulative=cumulative)
print(evsd.results)
uvsd = SignalDetection(signal, noise, equal_variance=False)
uvsd.fit(fit_method, cumulative=cumulative, verbose=True)
print(uvsd.results)
dpsd = DualProcess(signal, noise)
dpsd.fit(fit_method, cumulative=cumulative)
print(dpsd.results)
# Plot ROC curves
fig, ax = plt.subplots(dpi=150)
plot_roc(signal, noise, ax=ax)
ax.plot(*ht.curve, label=ht.label)
ax.plot(*evsd.curve, label=evsd.label)
ax.plot(*uvsd.curve, label=uvsd.label)
ax.plot(*dpsd.curve, label=dpsd.label)
ax.legend(loc='lower right')
plt.show()
# Plot convergence for each model
fig, ax = plt.subplots(2,2, dpi=150)
for axis, model in zip(ax.flatten(), [ht, evsd, uvsd, dpsd]):
axis.plot(model.convergence)
axis.set(xlabel='iteration', ylabel=fit_method, title=model.label)
plt.tight_layout()
plt.show() | /roc_face-0.1.1-py3-none-any.whl/roc_face/models.py | 0.916668 | 0.690037 | models.py | pypi |
from poppy.core.logger import logger
__all__ = ['FilmException',
'MetadataException',
'UnknownPipeline',
'AncReportProdError',
'LoadDataSetError',
'NoData',
'NoEpochFoundError',
'L0ProdFailure',
'L1BiaProdError',
'L1SurvProdFailure',
'L1PostProError',
'HkProdFailure',
'L1SbmProdError',
'AncBiaProdError',
'HandlingFileError',
'InvalidDataVersion',
'EmptyInput']
class FilmException(Exception):
"""FILM plugin generic exception."""
pass
class EmptyInput(Exception):
"""Exception raise if an input is empty."""
def __init__(self, message, *args, **kwargs):
super(EmptyInput, self).__init__(*args, **kwargs)
logger.error(message)
self.message = message
class HandlingFileError(Exception):
"""Exception raise if issue happens when handling a file (e.g., copy, move, delete)."""
def __init__(self, message, *args, **kwargs):
super(HandlingFileError, self).__init__(*args, **kwargs)
logger.error(message)
self.message = message
class MetadataException(Exception):
"""Exception raise if issue with metadata."""
def __init__(self, message, *args, **kwargs):
super(MetadataException, self).__init__(*args, **kwargs)
logger.error(message)
self.message = message
class AncReportProdError(Exception):
"""Exception for summary Report production."""
pass
class UnknownPipeline(Exception):
"""Exception for unknown pipeline ID."""
pass
class NoEpochFoundError(Exception):
"""
Exception raised when the no Epoch variable found
"""
def __init__(self, message, *args, **kwargs):
super(NoEpochFoundError, self).__init__(*args, **kwargs)
logger.error(message)
self.message = message
class L1SurvProdFailure(Exception):
"""
Exception raised when the L1 survey CDF production has failed
"""
def __init__(self, message, *args, **kwargs):
super(L1SurvProdFailure, self).__init__(*args, **kwargs)
logger.error(message)
self.message = message
class L1PostProError(Exception):
"""
Exception raised when the L1 survey CDF production has failed
"""
def __init__(self, message, *args, **kwargs):
super(L1PostProError, self).__init__(*args, **kwargs)
logger.error(message)
self.message = message
class L0ProdFailure(Exception):
"""
Exception raised when the L0 HDF5 production has failed
"""
def __init__(self, message=None, *args, **kwargs):
super(L0ProdFailure, self).__init__(*args, **kwargs)
if message:
logger.error(message)
self.message = message
class HkProdFailure(Exception):
"""
Exception raised when the HK CDF production has failed
"""
def __init__(self, message, *args, **kwargs):
super(HkProdFailure, self).__init__(*args, **kwargs)
logger.error(message)
self.message = message
class LoadDataSetError(Exception):
"""
Exception raised when dataset cannot be loaded correctly
"""
def __init__(self, message, *args, **kwargs):
super(LoadDataSetError, self).__init__(*args, **kwargs)
logger.error(message)
self.message = message
pass
class NoData(Exception):
"""
Exception raised when no output data processed
"""
def __init__(self, message=None, ll=logger.error,
*args, **kwargs):
super(NoData, self).__init__(*args, **kwargs)
if message is not None:
ll(message)
self.message = message
pass
class L1SbmProdError(Exception):
"""Exception for L1 SBM production."""
"""
For exceptions related to the L1 SBM data file production
"""
def __init__(self, message, *args, **kwargs):
super(L1SbmProdError, self).__init__(*args, **kwargs)
logger.error(message)
self.message = message
# logger_level = 'warning'
# use_traceback = True
pass
class L1BiaProdError(Exception):
"""Exception raised if L1 Bias data production has failed."""
def __init__(self, message, *args, **kwargs):
super(L1BiaProdError, self).__init__(*args, **kwargs)
logger.error(message)
self.message = message
# logger_level = 'warning'
# use_traceback = True
pass
class AncBiaProdError(Exception):
"""Exception raised if ANC Bias data production has failed."""
def __init__(self, message, *args, **kwargs):
super(AncBiaProdError, self).__init__(*args, **kwargs)
logger.error(message)
self.message = message
# logger_level = 'warning'
# use_traceback = True
pass
class InvalidDataVersion(Exception):
"""Exception raised if Data version is invalid."""
def __init__(self, message, *args, **kwargs):
super(InvalidDataVersion, self).__init__(*args, **kwargs)
logger.error(message)
self.message = message
# logger_level = 'warning'
# use_traceback = True
pass | /roc_film-1.13.4.tar.gz/roc_film-1.13.4/roc/film/exceptions.py | 0.717408 | 0.166235 | exceptions.py | pypi |
import re
from os import path as osp
from datetime import datetime
from poppy.core.configuration import Configuration
from poppy.core.logger import logger
from poppy.pop.plugins import Plugin
from roc.film.exceptions import UnknownPipeline, InvalidDataVersion
from roc.film.tools import valid_data_version
from roc.film.constants import PLUGIN, TIME_DAILY_STRFORMAT, \
CDF_TRANGE_STRFORMAT, UNKNOWN_IDB, DATA_VERSION
__all__ = ['init_l0_meta',
'init_cdf_global',
'get_data_version',
'set_logical_file_id',
'get_logical_file_id',
'get_spice_kernels'
]
def init_cdf_global(l0_attrs, task, master_path,
overwrite=None):
"""
Define global attributes data to save into the CDF from the content of the L0 file and task.
See roc.film.tasks.l0.init_l0_meta for the list of specific L0 meta
(Generic metadata are filled via the roc.rap plugin)
:param l0_attrs: RPW L0 file attributes
:param task: input task containing properties
:master_path: Path to the master CDF
:overwrite: Dictionary containing g.attrs as keys and expecting values as values
:return: output CDF global attribute dictionary
"""
from spacepy.pycdf import CDF
# Initialize the output dictionary that will contain the metadata for CDF
# by import master CDF global attributes
meta = dict(CDF(master_path).attrs)
pipeline_id = Configuration.manager['descriptor']['pipeline.identifier'].upper()
pipeline_version = Configuration.manager['descriptor']['pipeline.release.version']
if pipeline_id == 'RGTS':
# Specific to RGTS
meta['Pipeline_name'] = pipeline_id + '>ROC Ground Test SGSE'
try:
meta['Test_name'] = l0_attrs['Test_name'].encode('utf-8')
meta['Test_uuid'] = l0_attrs['Test_uuid']
meta['Test_description'] = l0_attrs[
'Test_description'].encode('utf-8')
meta['Test_creation_date'] = l0_attrs['Test_creation_date']
meta['Test_launched_date'] = l0_attrs['Test_launched_date']
meta['Test_terminated_date'] = l0_attrs['Test_terminated_date']
meta['Test_log_file'] = l0_attrs['Test_log_file']
# ID of the test for the ROC internal use
meta['Test_id'] = l0_attrs['Test_id']
except Exception:
logger.warning('No "Test_*" attribute found for the input l0')
elif pipeline_id == 'RODP':
# TODO - Complete specific global attributes for RODP
meta['Pipeline_name'] = pipeline_id + '>RPW Operation and Data Pipeline'
pass
else:
raise UnknownPipeline(f'UNKNOWN PIPELINE TYPE:'
f' {pipeline_id}, ABORTING!')
# Common global attributes
try:
# Perform some verifications on metadata
if str(meta['Pipeline_name']) != str(l0_attrs['Pipeline_name']):
logger.warning('Pipeline_name is inconsistent '
f"between the pipeline ({meta['Pipeline_name']})"
f"and the input L0 file ({l0_attrs['Pipeline_name']})!")
meta['Pipeline_version'] = pipeline_version
meta['Parents'] = ['CDF>' + l0_attrs['Logical_file_id']]
meta['Parent_version'] = valid_data_version(l0_attrs['Data_version'])
meta['Free_field'] = l0_attrs['Free_field']
meta['Software_version'] = Plugin.manager[PLUGIN].version
# Use for building filename
meta['Datetime'] = l0_attrs['Datetime']
# provider in the good format
meta['Provider'] = l0_attrs['Provider']
# Get file naming convention
meta['File_naming_convention'] = l0_attrs['File_naming_convention']
except Exception:
logger.error('Missing attributes in l0 file!')
# the name of the software (plugin) that generated the file, from the
# descriptor information
meta['Software_name'] = PLUGIN
# Initialize Validate (0 = no validation)
meta['Validate'] = '0'
# Initialize data_version to "01"
meta['Data_version'] = get_data_version(task)
# If overwrite keyword, then replace g.attrs value
if overwrite:
for key, val in overwrite.items():
meta[key] = val
logger.debug(f'{key} g.attribute value set to {val}')
# Initialize logical_file_id
meta['Logical_file_id'] = set_logical_file_id(meta)
return meta
def get_idb_version(task, **kwargs):
"""
Try to get idb version used to parsed packets
:param task: task instance
:return: string with idb_version
"""
idb_version = task.pipeline.get('idb_version',
default=kwargs.get('idb_version', UNKNOWN_IDB))
try:
idb_version = task.inputs['raw_data'].value.packet_parser.idb_version
except Exception:
logger.debug('No IDB version found in the input raw_data:\n'
f'attempting to retrieve value from pipeline properties: {idb_version}')
return idb_version
def get_idb_source(task, **kwargs):
"""
Try to get idb source used to parsed packets
:param task: task instance
:return: string with idb_source
"""
idb_source = task.pipeline.get('idb_source',
default=kwargs.get('idb_source', UNKNOWN_IDB))
try:
idb_source = task.inputs['raw_data'].value.packet_parser.idb_source
except Exception:
logger.debug('No IDB source found in the input raw_data:\n'
f'attempting to retrieve value from pipeline properties: {idb_source}')
return idb_source
def init_l0_meta(task,
extra_attrs={}):
"""
Initialize RPW L0 metadata
:param task: task
:param extra_attrs: Dictionary containing extra attributes to be inserted into the L0 root groupe.
:return: meta, a dictionary containing metadata for the output L0 file.
"""
# Initialize the output dictionary that will contain the metadata for L0
meta = dict()
# Retrieve required values from the pipeline properties
# Get pipeline ID ("RGTS" or "RODP")
pipeline_id = task.pipeline.properties.configuration['environment.ROC_PIP_NAME'].upper()
# Get input RawData value
try:
raw_data = task.inputs['raw_data'].value
except Exception:
raw_data = None
# Get metadata specific to ROC-SGSE
if pipeline_id == 'RGTS':
meta['Pipeline_name'] = pipeline_id + '>ROC Ground Test SGSE'
try:
# Get the 7 first characters of the test log SHA
test_sha = raw_data.sha1
test_short_sha = raw_data.short_sha1
meta['Test_name'] = raw_data.name
meta['Test_uuid'] = raw_data.uuid
meta['Test_description'] = raw_data.description
meta['Test_creation_date'] = str(raw_data.creation_date)
meta['Test_launched_date'] = str(raw_data.date)
meta['Test_terminated_date'] = str(raw_data.terminated_date)
meta['Test_log_file'] = osp.basename(raw_data.file_path)
meta['Test_id'] = test_short_sha + '>' + test_sha
meta['Free_field'] = '-'.join([task.pipeline.provider[:3].lower(),
test_short_sha])
meta['Datetime'] = '-'.join([raw_data.time_min.strftime(CDF_TRANGE_STRFORMAT),
raw_data.time_max.strftime(CDF_TRANGE_STRFORMAT)])
except Exception:
logger.warning('No input test log found!')
meta['Free_field'] = ''
meta['Datetime'] = datetime.now().strftime(TIME_DAILY_STRFORMAT)
meta['File_naming_convention'] = '<Source_name>_<LEVEL>_<Descriptor>_' \
'<Datetime>_V<Data_version>_' \
'<Free_field>'
elif pipeline_id == 'RODP':
# Get metadata specific to RODP
# TODO - Complete metadata for RPW L0
meta['File_naming_convention'] = '<Source_name>_<LEVEL>_<Descriptor>_' \
'<Datetime>_V<Data_version>'
meta['Pipeline_name'] = pipeline_id + '>RPW Operation and Data Pipeline'
meta['Free_field'] = ''
# Define Datetime value
datetime_attr = extra_attrs.pop('Datetime', None)
if datetime_attr is None:
if raw_data is not None and hasattr(raw_data, 'datetime') and raw_data.datetime is not None:
datetime_attr = raw_data.datetime.strftime(TIME_DAILY_STRFORMAT)
else:
datetime_attr = task.pipeline.get('datetime')
if datetime_attr is None:
logger.warning('Unknown Datetime attribute value')
else:
datetime_attr = datetime_attr.strftime(TIME_DAILY_STRFORMAT)
meta['Datetime'] = datetime_attr
else:
raise UnknownPipeline(f'UNKNOWN PIPELINE TYPE:'
f' {pipeline_id}, ABORTING!')
# Common metadata
meta['Project'] = 'SOLO>Solar Orbiter'
meta['Source_name'] = 'SOLO>Solar Orbiter'
meta['Software_name'] = PLUGIN
meta['Software_version'] = Plugin.manager[PLUGIN].version
meta['Dataset_ID'] = 'SOLO_L0_RPW'
meta['Descriptor'] = 'RPW>Radio and Plasma Waves instrument'
meta['LEVEL'] = 'L0>Level 0 data processing'
meta['Provider'] = '>'.join(
[
task.pipeline.provider[:3].upper(),
task.pipeline.provider,
]
)
meta['Pipeline_version'] = Configuration.manager['descriptor'][
'pipeline.release.version'
]
# Initialize data_version
data_version = extra_attrs.pop('Data_version', None)
if data_version is None:
# Initialize data_Version to "01"
meta['Data_version'] = get_data_version(task)
else:
meta['Data_version'] = data_version
# Add extra attributes (if any)
if extra_attrs:
for key, val in extra_attrs.items():
meta[key] = val
# Initialize logical_file_id
meta['Logical_file_id'] = set_logical_file_id(meta)
return meta
def get_data_version(task):
"""
Get value of Data_version attribute.
:param task: input pipeline task object
:return: string containing Data_version value
"""
data_version = task.pipeline.get('data_version', default=None, args=True)
if data_version is None:
data_version = DATA_VERSION
else:
data_version = data_version[0]
# Try to get Data_version from task (return only two digits)
try:
return valid_data_version(data_version)
except Exception:
raise InvalidDataVersion(f'Input data version is invalid: {data_version}')
def set_logical_file_id(metadata):
"""
Define Logical_file_id attribute value from the input metadata.
:param metadata: dictionary containing metadata attributes
:return: logical_file_id value
"""
# Get expected fields in the file_naming_convention
logical_file_id = str(metadata['File_naming_convention'])
for field in re.findall(r'<([A-Za-z0-9_\-]+)>', logical_file_id):
# Extract value from metadata
value = str(metadata[field]).split('>')[0]
if field == 'Datetime' or field == 'LEVEL':
value = value.upper()
else:
value = value.lower()
logical_file_id = logical_file_id.replace('<' + field + '>', value)
return logical_file_id
def get_logical_file_id(filename):
"""
Get the logical file ID for a file given its complete file name on the
system. Assumes that the convention of the file name according to the one
of the ROC is correct.
:param filename: input file name
:return: string of the expected logical_file_id
"""
return osp.basename(osp.splitext(filename)[0])
def get_spice_kernels(time_instance=None,
pattern=None):
# If time_instance not passed as input argument,
# then initialize it from Time class (singleton)
if time_instance is None:
from roc.rpl.time import Time
time_instance = Time()
# get all loaded kernels
loaded_kernels = time_instance.spice.kall()
if pattern is not None:
loaded_kernels = [kfile for kfile in loaded_kernels.keys()
if pattern in kfile]
else:
loaded_kernels = list(loaded_kernels.keys())
return loaded_kernels | /roc_film-1.13.4.tar.gz/roc_film-1.13.4/roc/film/tools/metadata.py | 0.452778 | 0.172433 | metadata.py | pypi |
import argparse
from datetime import datetime
import logging
import os
from roc.guest.constants import INPUT_DATETIME_STRFTIME
from roc.guest.exceptions import GuestException
__all__ = ['valid_time', 'raise_error', 'valid_single_file', 'valid_data_version']
logger = logging.getLogger(__name__)
def raise_error(message, exception=GuestException):
"""Add an error entry to the logger and raise an exception."""
logger.error(message)
raise exception(message)
def valid_data_version(data_version):
"""
Make sure to have a valid data version.
:param data_version: integer or string containing the data version
:return: string containing valid data version (i.e., 2 digits string)
"""
try:
if isinstance(data_version, list):
data_version = data_version[0]
data_version = int(data_version)
return f"{data_version:02d}"
except ValueError:
raise_error(f"Input value for --data-version is not valid! ({data_version})")
def valid_single_file(file):
"""
Make sure to have a valid single file.
:param file: 1-element list or string containing the path to the file
:return:
"""
try:
if isinstance(file, list):
file = file[0]
if os.path.isfile(file):
return file
else:
raise FileNotFoundError
except FileNotFoundError:
raise_error(f"Input file not found! ({file})",
exception=FileNotFoundError)
except ValueError:
raise_error(f"Input file is not valid! ({file})",
exception=ValueError)
except Exception as e:
raise_error(f"Problem with input file! ({file})",
exception=e)
def valid_time(t, format=INPUT_DATETIME_STRFTIME):
"""
Validate input datetime string format.
:param t: input datetime string
:param format: expected datetime string format
:return: datetime object with input datetime info
"""
if t:
try:
return datetime.strptime(t, format)
except ValueError:
raise_error(f"Not a valid datetime: '{t}'.",
exception=argparse.ArgumentTypeError) | /roc_guest-1.3.0.tar.gz/roc_guest-1.3.0/roc/guest/tools.py | 0.570092 | 0.198045 | tools.py | pypi |
from poppy.core.command import Command
from roc.guest.tools import valid_time, valid_single_file
from roc.guest.constants import SCOS_HEADER_BYTES, IDB_SOURCE
from roc.guest.tasks import \
xml_to_test, txt_to_test, \
test_to_xml, test_to_txt, \
mebdb_to_test, test_to_mebdb, \
raw_to_tmraw, raw_to_tcreport
from roc.guest.tasks.parse_test_packets import ParseTestPackets
from roc.guest.tasks import L0ToTest, TestToL0
from roc.guest.tasks.rocdb import clear_test
__all__ = []
class GuestCommands(Command):
"""
Manage the commands relative to the roc.guest module.
"""
__command__ = 'guest'
__command_name__ = 'guest'
__parent__ = 'master'
__parent_arguments__ = ['base']
__help__ = """
Commands relative to the roc.guest module.
"""
def add_arguments(self, parser):
# Option to filter test log data by testname pattern
parser.add_argument(
'-p', '--pattern',
nargs=1,
help="""
Testname filter pattern.
""",
type=str,
)
# Option to set a lower time interval limit
parser.add_argument(
'--start-time',
help="""
Minimum of the date/time interval to export [YYYY-mm-ddTHH:MM:SS].
""",
default=None,
type=valid_time,
)
# Option to set a upper time interval limit
parser.add_argument(
'--end-time',
help="""
Maximum of the date/time interval to export [YYYY-mm-ddTHH:MM:SS].
""",
default=None,
type=valid_time,
)
# Optional argument to only process "Terminated" Test
parser.add_argument(
'-T', '--terminated',
help="""
Only process terminated tests
""",
action='store_true',
)
# specify the IDB version to use
parser.add_argument(
'--idb-version',
help='IDB version to use.',
nargs=1,
default=None,
)
# specify the IDB source to use
parser.add_argument(
'--idb-source',
help='IDB source to use (MIB, SRDB or PALISADE).',
nargs=1,
default=[IDB_SOURCE],
)
# Provide output test name
parser.add_argument(
'--out-test-name',
type=str,
help='Output test name.',
)
# Provide output test description
parser.add_argument(
'--out-test-descr',
type=str,
help='Output test description.',
)
# Remove SCOS2000 header in the binary packet
parser.add_argument(
'--scos-header', nargs=1,
type=int, default=[None],
help='Remove the '
'SCOS2000 header in the packet(s).'
' (Value for MOC DDS should be {0} bytes.)'.format(SCOS_HEADER_BYTES)
)
# no-spice keyword to force not use of SPICE kernels
parser.add_argument(
'--no-spice', action='store_true',
default=False,
help='If False, then use SPICE kernels to compute time (SPICE_KERNEL_PATH env. variable must be defined)'
)
class MebDbToXmlCommand(Command):
"""
Command to extract test log data from a MEB GSE database and convert it into XML files (one file per test).
"""
__command__ = 'guest_mebdb_to_xml'
__command_name__ = 'mebdb_to_xml'
__parent__ = 'guest'
__parent_arguments__ = ['base']
__help__ = """
Command to export test log data from a MEB GSE database (one XML file per test).
"""
def setup_tasks(self, pipeline):
# starting point of the pipeline
start = mebdb_to_test()
# task starting the loop
end = test_to_xml()
# create the tasks workflow
pipeline | start | end
# define the start points of the pipeline
pipeline.start = start
pipeline.end = end
# create a loop
pipeline.loop(start, end, start.generator)
class XmlToMebDbCommand(Command):
__command__ = 'guest_xml_to_mebdb'
__command_name__ = 'xml_to_mebdb'
__parent__ = 'guest'
__parent_arguments__ = ['base']
__help__ = 'Command to import a given test log XML into a MEB GSE database.'
def add_arguments(self, parser):
parser.add_argument(
'test_log_xml',
nargs=1,
type=str,
default=None,
help='Input test log XML file to import'
)
def setup_tasks(self, pipeline):
# Parse input file, then import data inside MEB GSE database
start = xml_to_test()
end = test_to_mebdb()
# set the pipeline workflow
pipeline | start | end
# define the start points of the pipeline
pipeline.start = start
class TxtToXmlCommand(Command):
"""
Manage command to convert an input ADS GSE text format file
into a MEB GSE XML test log file.
"""
__command__ = 'guest_txt_to_xml'
__command_name__ = 'txt_to_xml'
__parent__ = 'guest'
__parent_arguments__ = ['base']
__help__ = """
Manage command to convert an input ADS GSE text format file
into a MEB GSE XML test log file.
"""
def add_arguments(self, parser):
# add lstable argument
# LSTableMixin.add_arguments(parser)
# path to input ADS file
parser.add_argument(
'ads_gse_txt',
help="""
The input ADS GSE text format file to convert.
""",
type=valid_single_file,
)
# path to input ADS file
parser.add_argument(
'--output-file',
help="""
The output test log XML file.
""",
type=str,
default=None,
)
# Remove header in bytes from the binary packets
parser.add_argument(
'--remove-header',
help='Remove a header (in bytes) from the binary packet(s)',
type=int,
default=None,
)
def setup_tasks(self, pipeline):
# Import task
from roc.rpl.tasks import IdentifyPackets as identify_packets
# Define start/end tasks
start = txt_to_test()
# Create topology
pipeline | start | identify_packets() | test_to_xml()
# define the start points of the pipeline
pipeline.start = start
class XmlToDdsCommand(Command):
"""
Command to convert an input MEB GSE test log XML format file
into MOC DDS TmRaw and/or TcReport XML file(s).
"""
__command__ = 'guest_xml_to_dds'
__command_name__ = 'xml_to_dds'
__parent__ = 'guest'
__parent_arguments__ = ['base']
__help__ = """
Command to convert an input MEB GSE test log XML format file
into MOC DDS TmRaw and/or TcReport XML file(s).
"""
def add_arguments(self, parser):
# add lstable argument
# LSTableMixin.add_arguments(parser)
# path to input ADS file
parser.add_argument(
'test_log_xml',
help="""
The input MEB GSE Test log XML format file to convert.
""",
type=valid_single_file,
)
# Remove header in bytes from the binary packets
parser.add_argument(
'--scos-header-size',
help='Bytes length of the dummy SCOS header to add in the DDS binary packet(s)',
type=int,
default=None,
)
# Name of the output DDS tmraw xml file
parser.add_argument(
'--output-tmraw-xml',
help='Name of the output DDS tmraw xml file',
type=str,
)
# Name of the output DDS tcreport xml file
parser.add_argument(
'--output-tcreport-xml',
help='Name of the output DDS tcreport xml file',
type=str,
)
def setup_tasks(self, pipeline):
# Import task
from roc.rpl.tasks import IdentifyPackets as identify_packets
# Define start task
start = xml_to_test()
# Create topology
pipeline | start | identify_packets() | raw_to_tmraw() | raw_to_tcreport()
# define the start point of the pipeline
pipeline.start = start
class XmlToTxtCommand(Command):
"""
Manage command to convert an input MEB GSE text log XML file into
a ADS GSE text format file.
"""
__command__ = 'guest_xml_to_txt'
__command_name__ = 'xml_to_txt'
__parent__ = 'guest'
__parent_arguments__ = ['base']
__help__ = """
Command to convert an input MEB GSE text log XML file into
a ADS GSE text format file.
"""
def add_arguments(self, parser):
# add lstable argument
# LSTableMixin.add_arguments(parser)
# path to input ADS file
parser.add_argument(
'test_log_xml',
help="""
The input MEB GSE test log XML file to convert.
""",
type=valid_single_file,
)
# path to input ADS file
parser.add_argument(
'--output-file',
help="""
The output ADS text file.
""",
type=str, default=None,
)
def setup_tasks(self, pipeline):
start = xml_to_test()
end = test_to_txt()
# Create topology
pipeline | start | end
# define the start points of the pipeline
pipeline.start = start
class ClearDbCommand(Command):
"""
Manage command to clear a given test
inside the ROC database,
providing the test name and uuid.
"""
__command__ = 'guest_clear'
__command_name__ = 'clear'
__parent__ = 'guest'
__parent_arguments__ = ['guest']
__help__ = """
Command to clear a test inside the ROC database.
"""
def add_arguments(self, parser):
# the name of the test
parser.add_argument(
'--test-name',
nargs=1,
help="""
The name of the test to remove.
""",
type=str,
default=[None],
)
# the UUID
parser.add_argument(
'--test-uuid',
nargs=1,
help="""
The UUID of the test to remove.
""",
type=str,
default=[None],
)
# Clear all tests
parser.add_argument(
'--clear-all',
help="""
Clear all tests in the ROC database (USE WITH CAUTION!).
""",
action='store_true',
)
def setup_tasks(self, pipeline):
# starting task
start = clear_test()
# Set test terminated status off
pipeline.properties.test_terminated = False
# Set input arguments
pipeline.properties.test_name = pipeline.properties.test_name[0]
pipeline.properties.test_uuid = pipeline.properties.test_uuid[0]
# create the tasks and their dependencies
pipeline | start
# define the start points of the pipeline
pipeline.start = start
class ClearDbFromXmlCommand(Command):
"""
Manage command to clear a given test
inside the ROC database,
providing the test log XML file.
"""
__command__ = 'guest_clear_xml'
__command_name__ = 'clear_from_xml'
__parent__ = 'guest'
__parent_arguments__ = ['base']
__help__ = """
Command to clear a test inside the ROC database from
its test log XML file.
"""
def add_arguments(self, parser):
# the name of the test
parser.add_argument(
'test_log_xml',
help="""
The XML file the test to remove.
""",
type=str,
)
def setup_tasks(self, pipeline):
# starting task
start = xml_to_test()
end = clear_test()
# Set test terminated status off
pipeline.properties.test_terminated = False
# create the tasks and their dependencies
pipeline | start | end
# define the start points of the pipeline
pipeline.start = start
class L0ToXmlCommand(Command):
__command__ = 'guest_l0_to_xml'
__command_name__ = 'l0_to_xml'
__parent__ = 'guest'
__parent_arguments__ = ['base']
__help__ = 'Command to convert a given RPW L0 hd5 file into a MEB GSE test log XML file'
def add_arguments(self, parser):
parser.add_argument(
'l0_file',
type=str,
nargs=1,
help='Input RPW L0 hd5 file to convert'
)
parser.add_argument('--test-log-xml',
nargs=1,
default=None,
type=str,
help='Full path of the output test log XML file'
)
def setup_tasks(self, pipeline):
# Parse input file, then convert it into output test log XML
start = L0ToTest()
end = test_to_xml()
# set the pipeline workflow
pipeline | start | end
# define the start points of the pipeline
pipeline.start = start
class FromXmlCommand(Command):
__command__ = 'guest_testlog_to_l0'
__command_name__ = 'testlog_to_l0'
__parent__ = 'guest'
__parent_arguments__ = ['base']
__help__ = 'Command to generate RPW L0 files from a given input MEB GSE test log XML file'
def add_arguments(self, parser):
parser.add_argument(
'--test-log-xml',
required=True,
type=str,
nargs=1,
help='Input RPW MEB GSE test log XML file to process'
)
def setup_tasks(self, pipeline):
# Parse input file, then convert it into output test log XML
start = xml_to_test()
end = TestToL0()
# set the pipeline workflow
pipeline | start | ParseTestPackets() | end
# define the start points of the pipeline
pipeline.start = start | /roc_guest-1.3.0.tar.gz/roc_guest-1.3.0/roc/guest/commands.py | 0.720073 | 0.293044 | commands.py | pypi |
# Reference documentation: ROC-GEN-SYS-NTT-00038-LES
"""
Database model for packet_log table.
"""
from poppy.core.db.non_null_column import NonNullColumn
from poppy.core.db.base import Base
from sqlalchemy import String, ForeignKey, UniqueConstraint, Index
from sqlalchemy.orm import relationship, validates, backref
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.dialects.postgresql import (
BIGINT,
BOOLEAN,
DOUBLE_PRECISION,
ENUM,
INTEGER,
SMALLINT,
TIMESTAMP,
)
__all__ = [
'PacketLog',
]
class PacketLog(Base):
"""
Class representation of the table for packet_log table in the ROC database.
"""
id_packet_log = NonNullColumn(BIGINT(), primary_key=True)
length = NonNullColumn(INTEGER(),
descr='Packet length in bytes')
type = NonNullColumn(String(8),
descr='Packet type (TC or TM)')
category = NonNullColumn(String(512),
descr='Packet PALISADE category')
apid = NonNullColumn(INTEGER(), nullable=True,
descr='Packet APID')
sync_flag = NonNullColumn(BOOLEAN, nullable=True,
descr='TM packet time synchronization flag')
utc_time = NonNullColumn(TIMESTAMP,
descr='Packet creation/execution UTC time')
srdb_id = NonNullColumn(String(16),
descr='Packet name (SRDB ID)')
palisade_id = NonNullColumn(String(256),
descr='Packet PALISADE ID')
binary = NonNullColumn(String(),
descr='Packet raw binary data (in hexadecimal)')
sha = NonNullColumn(String(), nullable=True,
descr='Packet sha (hexdigest)')
idb_version = NonNullColumn(String(128), nullable=True,
descr='IDB version used to identify packet')
idb_source = NonNullColumn(String(128), nullable=True,
descr='IDB source used to identify packet')
creation_time = NonNullColumn(String(1024), nullable=True,
descr='Packet creation time in CCSDS CUC format coarse:fine. For TM only.')
ack_exe_state = NonNullColumn(String(16), nullable=True,
descr='Packet acknowledgment'
'execution completion status. For TC only.')
ack_acc_state = NonNullColumn(String(16), nullable=True,
descr='TC packet acknowledgment acceptance status. For TC only.')
sequence_name = NonNullColumn(String(16), nullable=True,
descr='Sequence name. For TC only.')
unique_id = NonNullColumn(String(256), nullable=True,
descr='Unique ID. For TC only.')
insertion_time = NonNullColumn(TIMESTAMP,
descr='Packet insertion local time.')
__tablename__ = 'packet_log'
__table_args__ = (
UniqueConstraint('utc_time', 'srdb_id'),
{
'schema': 'gse',
}
)
class InvalidPacketLog(Base):
"""
Class representation of the table for invalid_packet_log table in the ROC database.
"""
id_invalid_packet_log = NonNullColumn(BIGINT(), primary_key=True)
idb_version = NonNullColumn(String(128), nullable=True,
descr='IDB version used to analyze packet')
idb_source = NonNullColumn(String(128), nullable=True,
descr='IDB source used to analyze packet')
apid = NonNullColumn(INTEGER(), nullable=True,
descr='Packet APID')
binary = NonNullColumn(String(),
descr='Packet raw binary data (in hexadecimal)')
comment = NonNullColumn(String(), nullable=True,
descr='Additional comment about why packet is invalid')
insertion_time = NonNullColumn(TIMESTAMP,
descr='Packet insertion local time.')
utc_time = NonNullColumn(TIMESTAMP,
descr='Packet creation/execution UTC time', nullable=True)
srdb_id = NonNullColumn(String(16),
descr='Packet name (SRDB ID)', nullable=True)
sha = NonNullColumn(String(), nullable=True,
descr='Packet sha (hexdigest)')
__tablename__ = 'invalid_packet_log'
__table_args__ = (
UniqueConstraint('binary'),
{
'schema': 'gse',
}
) | /roc_guest-1.3.0.tar.gz/roc_guest-1.3.0/roc/guest/models/packet.py | 0.808937 | 0.167423 | packet.py | pypi |
"""GUEST plugin tasks related to RPW L0 file handling."""
import uuid
from datetime import datetime
import sys
import os
from roc.guest.constants import DATA_VERSION, TIME_ISO_STRFORMAT
from roc.guest.tools import valid_data_version
from roc.guest.guest import Test
try:
from poppy.core.logger import logger
from poppy.core.task import Task
from poppy.core.target import FileTarget, PyObjectTarget
except Exception:
sys.exit('POPPy framework seems to not be installed properly!')
try:
from roc.film.tools.metadata import init_l0_meta, get_spice_kernels
from roc.film.tools.file_helpers import generate_filepath, get_output_dir
from roc.film.tools.l0 import L0
except Exception:
sys.exit('Dependencies are missing!')
__all__ = ['L0ToTest', 'TestToL0']
class L0ToTest(Task):
"""
Parse input L0 and save content as a Test class instance.
"""
plugin_name = 'roc.guest'
name = 'l0_to_test'
def add_targets(self):
self.add_input(target_class=FileTarget,
identifier='l0_file',
filepath=self.get_l0_file())
self.add_output(target_class=PyObjectTarget,
identifier='raw_data')
def get_l0_file(self):
try:
return self.pipeline.get('l0_file', default=[None])[0]
except Exception:
pass
def run(self):
try:
l0_file = self.inputs['l0_file'].filepath
except Exception:
logger.error('No input RPW L0 file found')
return
# Store input L0 data as a Test class instance into output target value
self.outputs['raw_data'].value = Test.from_l0(l0_file)
class TestToL0(Task):
"""
Save test class instance data into an output L0 file.
"""
plugin_name = 'roc.guest'
name = 'test_to_l0'
def add_targets(self):
self.add_input(target_class=PyObjectTarget,
identifier='raw_data')
self.add_output(target_class=FileTarget,
identifier='l0_file')
def setup_input(self):
# Get/create list of well processed L0 files
self.processed_files = self.pipeline.get(
'processed_files', default=[], create=True)
# Get/create list of failed DDS files
self.failed_files = self.pipeline.get(
'failed_files', default=[], create=True)
# Get test data object
self.test_data = self.inputs['raw_data'].value
if not self.test_data:
logger.warning('Stopping test_to_l0 task: No input raw_data provided')
return False
else:
if self.test_data.version:
self.data_version = self.test_data.version
else:
self.data_version = valid_data_version(
self.pipeline.get('data_version', default=[DATA_VERSION])[0])
# If output directory not found, create it
self.output_dir = get_output_dir(self.pipeline)
if not os.path.isdir(self.output_dir):
logger.debug(f'Making {self.output_dir}...')
os.makedirs(self.output_dir)
return True
def run(self):
# Initialize inputs
if not self.setup_input():
return
# Initialize L0 metadata
extra_attrs = {'Data_version': self.data_version,
'TIME_MIN': self.test_data.date.strftime(TIME_ISO_STRFORMAT),
'TIME_MAX': self.test_data.terminated_date.strftime(TIME_ISO_STRFORMAT),
'Generation_date': datetime.utcnow().isoformat(),
'File_ID': str(uuid.uuid4()),
}
# Attempt to add SPICE kernels in L0 metadata
try:
sclk_file = get_spice_kernels(time_instance=self.test_data.packet_parser.time,
pattern='solo_ANC_soc-sclk')
except Exception:
sclk_file = None
if sclk_file:
extra_attrs['SPICE_KERNELS'] = sclk_file[-1]
else:
logger.info('No SPICE SCLK kernel found!')
# Initialize L0 metadata
l0_metadata = init_l0_meta(self,
extra_attrs=extra_attrs)
# Generate L0 filepath
l0_filepath = generate_filepath(self, l0_metadata, '.h5')
# Write L0 file
try:
L0().to_hdf5(l0_filepath,
packet_parser=self.test_data.packet_parser,
metadata=l0_metadata)
except Exception:
logger.exception(f'Producing {l0_filepath} has failed!')
self.failed_files.append(l0_filepath)
else:
self.processed_files.append(l0_filepath)
logger.info(f'{l0_filepath} saved') | /roc_guest-1.3.0.tar.gz/roc_guest-1.3.0/roc/guest/tasks/l0.py | 0.411466 | 0.201892 | l0.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.