code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
"""Placeholder docstring"""
from __future__ import absolute_import
import inspect
import sys
from typing import Callable
import six
from sagemaker_containers import _mapping
def matching_args(fn, dictionary): # type: (Callable, _mapping.Mapping) -> dict
"""Given a function fn and a dict dictionary, returns the function
arguments that match the dict keys.
Example:
def train(channel_dirs, model_dir): pass
dictionary = {'channel_dirs': {}, 'model_dir': '/opt/ml/model', 'other_args': None}
args = functions.matching_args(train, dictionary) # {'channel_dirs': {},
'model_dir': '/opt/ml/model'}
train(**args)
Args:
fn (function): a function
dictionary (dict): the dictionary with the keys
Returns:
(dict) a dictionary with only matching arguments.
"""
arg_spec = getargspec(fn)
if arg_spec.keywords:
return dictionary
return _mapping.split_by_criteria(dictionary, arg_spec.args).included
def getargspec( # pylint: disable=inconsistent-return-statements
fn
): # type: (Callable) -> inspect.ArgSpec
"""Get the names and default values of a function's arguments.
Args:
fn (function): a function
Returns:
`inspect.ArgSpec`: A collections.namedtuple with the following attributes:
* Args:
args (list): a list of the argument names (it may contain nested lists).
varargs (str): name of the * argument or None.
keywords (str): names of the ** argument or None.
defaults (tuple): an n-tuple of the default values of the last n arguments.
"""
if six.PY2:
return inspect.getargspec(fn) # pylint: disable=deprecated-method
elif six.PY3:
full_arg_spec = inspect.getfullargspec(fn)
return inspect.ArgSpec(
full_arg_spec.args, full_arg_spec.varargs, full_arg_spec.varkw, full_arg_spec.defaults
)
def error_wrapper(fn, error_class): # type: (Callable or None, Exception) -> ...
"""Wraps function fn in a try catch block that re-raises error_class.
Args:
fn (function): function to wrapped
error_class (Exception): Error class to be re-raised
Returns:
(object): fn wrapped in a try catch.
"""
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
six.reraise(error_class, error_class(e), sys.exc_info()[2])
return wrapper | /sagemaker_containers-2.8.6.post0.tar.gz/sagemaker_containers-2.8.6.post0/src/sagemaker_containers/_functions.py | 0.742608 | 0.408749 | _functions.py | pypi |
"""Placeholder docstring"""
from __future__ import absolute_import
import warnings
import flask
from six.moves import http_client
from sagemaker_containers import _content_types, _env, _logging, _mapping
env = _env.ServingEnv()
def default_healthcheck_fn(): # type: () -> Response
"""Ping is default health-check handler. Returns 200 with no content.
During a new serving container startup, Amazon SageMaker starts sending periodic GET requests
to the /ping endpoint to ensure that the container is ready for predictions.
The simplest requirement on the container is to respond with an HTTP 200 status code and an
empty body. This indicates to Amazon SageMaker that the container is ready to accept inference
requests at the /invocations endpoint.
If the container does not begin to consistently respond with 200s during the first 30 seconds
after startup, the CreateEndPoint and UpdateEndpoint APIs will fail.
While the minimum bar is for the container to return a static 200, a container developer can use
this functionality to perform deeper checks. The request timeout on /ping attempts is 2 seconds.
More information on how health-check works can be found here:
https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-inference-code.html#your-algorithms-inference-algo-ping-requests
Returns:
(flask.Response): with status code 200
"""
return Response(status=http_client.OK)
class Worker(flask.Flask):
"""Flask application that receives predictions from a Transformer ready for inferences."""
def __init__(
self,
transform_fn,
initialize_fn=None,
module_name=None,
healthcheck_fn=None,
execution_parameters_fn=None,
):
"""Creates and Flask application from a transformer.
Args:
transform_fn (function): responsible to make predictions against the model.
Follows the signature:
* Returns:
`sagemaker_containers.transformers.TransformSpec`: named tuple with prediction
data.
initialize_fn (function, optional): this function is called when the Flask application
starts. It doest not have return type or arguments.
healthcheck_fn (function, optional): function that will be used for healthcheck calls
when the containers starts, if not specified, it
will use ping as the default healthcheck call.
Signature:
execution_parameters_fn (function, optional): function that will be used for responding
to execution_parameters calls. If not
specified, execution-parameters endpoint
will not be supported.
* Returns:
`flask.app.Response`: response object with new healthcheck response.
module_name (str): the module name which implements the worker. If not specified, it
will use sagemaker_containers.ServingEnv().module_name as the default
module name.
"""
super(Worker, self).__init__(module_name or env.module_name)
# the logger is configured after importing the framework library, allowing the framework to
# configure logging at import time.
_logging.configure_logger(env.log_level)
if initialize_fn:
self.before_first_request(initialize_fn)
self.add_url_rule(
rule="/invocations", endpoint="invocations", view_func=transform_fn, methods=["POST"]
)
self.add_url_rule(
rule="/ping", endpoint="ping", view_func=healthcheck_fn or default_healthcheck_fn
)
if execution_parameters_fn:
self.add_url_rule(
rule="/execution-parameters",
endpoint="execution-parameters",
view_func=execution_parameters_fn,
methods=["GET"],
)
self.request_class = Request
class Response(flask.Response):
"""Placeholder docstring"""
default_mimetype = _content_types.JSON
def __init__(
self,
response=None,
accept=None,
status=http_client.OK,
headers=None,
mimetype=None,
direct_passthrough=False,
):
"""Placeholder docstring"""
if accept:
warnings.warn(
'ignoring deprecated "accept" argument to Response.__init__', DeprecationWarning
)
super(Response, self).__init__(
response, status, headers, mimetype, None, direct_passthrough
)
class Request(flask.Request, _mapping.MappingMixin):
"""The Request object used to read request data.
Example:
POST /invocations
Content-Type: 'application/json'.
Accept: 'application/json'.
42
>>> from sagemaker_containers import _env
>>> request = Request()
>>> data = request.data
>>> print(str(request))
{'content_length': '2', 'content_type': 'application/json', 'data': '42',
'accept': 'application/json', ... }
"""
default_mimetype = _content_types.JSON
def __init__(self, environ=None, serving_env=None): # type: (dict, _env.ServingEnv) -> None
"""Placeholder docstring"""
super(Request, self).__init__(environ=environ or flask.request.environ)
serving_env = serving_env or env
self._default_accept = serving_env.default_accept
@property
def content_type(self): # type: () -> str
"""The request's content-type.
Returns:
(str): The value, if any, of the header 'ContentType' (used by some AWS services) and
'Content-Type'. Otherwise, returns 'application/json' as default.
"""
return (
self.headers.get("ContentType")
or self.headers.get("Content-Type")
or _content_types.JSON
)
@property
def accept(self): # type: () -> str
"""The content-type for the response to the client.
Returns:
(str): The value of the header 'Accept' or the user-supplied
SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT environment variable.
"""
accept = self.headers.get("Accept")
if not accept or accept == _content_types.ANY:
return self._default_accept
else:
return accept
@property
def content(self): # type: () -> object
"""The request incoming data.
It automatic decodes from utf-8
Returns:
(obj): incoming data
"""
as_text = self.content_type in _content_types.UTF8_TYPES
return self.get_data(as_text=as_text) | /sagemaker_containers-2.8.6.post0.tar.gz/sagemaker_containers-2.8.6.post0/src/sagemaker_containers/_worker.py | 0.839603 | 0.287668 | _worker.py | pypi |
"""This module contains utility functions used to generate recordio-protobuf format."""
import struct
import sys
import numpy as np
from scipy.sparse import issparse
from sagemaker_containers.record_pb2 import Record
def _resolve_type(dtype):
"""Returns the type string corresponding to the numpy.dtype
Args:
dtype (numpy.dtype or str): numpy.dtype object
Returns:
(str): string corresponding to the dtype
"""
if dtype == np.dtype(int):
return "Int32"
if dtype == np.dtype(float):
return "Float64"
if dtype == np.dtype("float32"):
return "Float32"
raise ValueError("Unsupported dtype {} on array".format(dtype))
def _write_feature_tensor(resolved_type, record, vector):
"""Writes the feature tensor in the record based on the resolved type.
Args:
resolved_type (str): String representing the feature type
record (Record object): Record object to write to
vector (np.array or csr_matrix): Represents the row (1D Array)
"""
if resolved_type == "Int32":
record.features["values"].int32_tensor.values.extend(vector)
elif resolved_type == "Float64":
record.features["values"].float64_tensor.values.extend(vector)
elif resolved_type == "Float32":
record.features["values"].float32_tensor.values.extend(vector)
def _write_label_tensor(resolved_type, record, scalar):
"""Writes the label to record based on the resolved type.
Args:
resolved_type (str): String representing the feature type
record (Record object): Record object
scalar (int or float32 or float64): label value
"""
if resolved_type == "Int32":
record.label["values"].int32_tensor.values.extend([scalar])
elif resolved_type == "Float64":
record.label["values"].float64_tensor.values.extend([scalar])
elif resolved_type == "Float32":
record.label["values"].float32_tensor.values.extend([scalar])
def _write_keys_tensor(resolved_type, record, vector):
"""Writes the keys entries in the Record object.
Args:
resolved_type (str): Representing the type of key entry
record (Record object): Record to which the key would be added
vector (array): Array of keys to be added
"""
if resolved_type == "Int32":
record.features["values"].int32_tensor.keys.extend(vector)
elif resolved_type == "Float64":
record.features["values"].float64_tensor.keys.extend(vector)
elif resolved_type == "Float32":
record.features["values"].float32_tensor.keys.extend(vector)
def _write_shape(resolved_type, record, scalar):
"""Writes the shape entry in the Record.
Args:
resolved_type (str): Representing the type of key entry
record (Record object): Record to which the key would be added
scalar (int or float32 or float64): the shape to added to the record
"""
if resolved_type == "Int32":
record.features["values"].int32_tensor.shape.extend([scalar])
elif resolved_type == "Float64":
record.features["values"].float64_tensor.shape.extend([scalar])
elif resolved_type == "Float32":
record.features["values"].float32_tensor.shape.extend([scalar])
def _write_numpy_to_dense_tensor(file, array, labels=None):
"""Writes a numpy array to a dense record
Args:
file (file-like object): file-like object where the
records will be written
array (numpy array): numpy array containing the features
labels (numpy array): numpy array containing the labels
"""
# Validate shape of array and labels, resolve array and label types
if not len(array.shape) == 2:
raise ValueError("Array must be a Matrix")
if labels is not None:
if not len(labels.shape) == 1:
raise ValueError("Labels must be a Vector")
if labels.shape[0] not in array.shape:
raise ValueError(
"Label shape {} not compatible with array shape {}".format(
labels.shape, array.shape
)
)
resolved_label_type = _resolve_type(labels.dtype)
resolved_type = _resolve_type(array.dtype)
# Write each vector in array into a Record in the file object
record = Record()
for index, vector in enumerate(array):
record.Clear()
_write_feature_tensor(resolved_type, record, vector)
if labels is not None:
_write_label_tensor(resolved_label_type, record, labels[index])
_write_recordio(file, record.SerializeToString())
def _write_spmatrix_to_sparse_tensor(file, array, labels=None):
"""Writes a scipy sparse matrix to a sparse tensor.
Args:
file (file-like object): file-like object where the
records will be written
array (array-like): a sparse matrix containing features
labels (numpy array): numpy array containing the labels
"""
if not issparse(array):
raise TypeError("Array must be sparse")
# Validate shape of array and labels, resolve array and label types
if not len(array.shape) == 2:
raise ValueError("Array must be a Matrix")
if labels is not None:
if not len(labels.shape) == 1:
raise ValueError("Labels must be a Vector")
if labels.shape[0] not in array.shape:
raise ValueError(
"Label shape {} not compatible with array shape {}".format(
labels.shape, array.shape
)
)
resolved_label_type = _resolve_type(labels.dtype)
resolved_type = _resolve_type(array.dtype)
csr_array = array.tocsr()
n_rows, n_cols = csr_array.shape
record = Record()
for row_idx in range(n_rows):
record.Clear()
row = csr_array.getrow(row_idx)
# Write values
_write_feature_tensor(resolved_type, record, row.data)
# Write keys
_write_keys_tensor(resolved_type, record, row.indices.astype(np.uint64))
# Write labels
if labels is not None:
_write_label_tensor(resolved_label_type, record, labels[row_idx])
# Write shape
_write_shape(resolved_type, record, n_cols)
_write_recordio(file, record.SerializeToString())
# MXNet requires recordio records have length in bytes that's a multiple of 4
# This sets up padding bytes to append to the end of the record, for diferent
# amounts of padding required.
padding = {}
for amount in range(4):
if sys.version_info >= (3,):
padding[amount] = bytes([0x00 for _ in range(amount)])
else:
padding[amount] = bytearray([0x00 for _ in range(amount)])
_kmagic = 0xCED7230A
def _write_recordio(f, data):
"""Wraps the data with RecordIO magic and writes to file-like object.
Args:
f (file-like object): The file-like object to which the data point will be written
data (numpy array):
"""
length = len(data)
f.write(struct.pack("I", _kmagic))
f.write(struct.pack("I", length))
pad = (((length + 3) >> 2) << 2) - length
f.write(data)
f.write(padding[pad])
def _read_recordio(f):
"""Reads a RecordIO and unpacks the body.
Args:
f: file like object
"""
while True:
try:
read_kmagic, = struct.unpack("I", f.read(4))
except struct.error:
return
assert read_kmagic == _kmagic
len_record, = struct.unpack("I", f.read(4))
pad = (((len_record + 3) >> 2) << 2) - len_record
yield f.read(len_record)
if pad:
f.read(pad) | /sagemaker_containers-2.8.6.post0.tar.gz/sagemaker_containers-2.8.6.post0/src/sagemaker_containers/_recordio.py | 0.799442 | 0.675577 | _recordio.py | pypi |
"""Placeholder docstring"""
from __future__ import absolute_import
import importlib
import os
import shlex
import subprocess # pylint: disable=unused-import
import sys
import textwrap
import warnings
import six
from sagemaker_containers import _env, _errors, _files, _logging, _process
logger = _logging.get_logger()
DEFAULT_MODULE_NAME = "default_user_module_name"
def exists(name): # type: (str) -> bool
"""Return True if the module exists. Return False otherwise.
Args:
name (str): module name.
Returns:
(bool): boolean indicating if the module exists or not.
"""
try:
importlib.import_module(name)
except ImportError:
return False
else:
return True
def has_requirements(path): # type: (str) -> None
"""Placeholder docstring"""
return os.path.exists(os.path.join(path, "requirements.txt"))
def prepare(path, name): # type: (str, str) -> None
"""Prepare a Python script (or module) to be imported as a module.
If the script does not contain a setup.py file, it creates a minimal setup.
Args:
path (str): path to directory with the script or module.
name (str): name of the script or module.
"""
setup_path = os.path.join(path, "setup.py")
if not os.path.exists(setup_path):
data = textwrap.dedent(
"""
from setuptools import setup
setup(packages=[''],
name="%s",
version='1.0.0',
include_package_data=True)
"""
% name
)
logger.info("Module %s does not provide a setup.py. \nGenerating setup.py" % name)
_files.write_file(setup_path, data)
data = textwrap.dedent(
"""
[wheel]
universal = 1
"""
)
logger.info("Generating setup.cfg")
_files.write_file(os.path.join(path, "setup.cfg"), data)
data = textwrap.dedent(
"""
recursive-include . *
recursive-exclude . __pycache__*
recursive-exclude . *.pyc
recursive-exclude . *.pyo
"""
)
logger.info("Generating MANIFEST.in")
_files.write_file(os.path.join(path, "MANIFEST.in"), data)
def install(path, capture_error=False): # type: (str, bool) -> None
"""Install a Python module in the executing Python environment.
Args:
path (str): Real path location of the Python module.
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
"""
cmd = "%s -m pip install . " % _process.python_executable()
if has_requirements(path):
cmd += "-r requirements.txt"
logger.info("Installing module with the following command:\n%s", cmd)
_process.check_error(
shlex.split(cmd), _errors.InstallModuleError, cwd=path, capture_error=capture_error
)
def s3_download(url, dst): # type: (str, str) -> None
"""Download a file from S3.
This method acts as an alias for :meth:`~sagemaker_containers.beta.framework.files.s3_download`
for backward-compatibility purposes.
Args:
url (str): the S3 URL of the file.
dst (str): the destination where the file will be saved.
"""
_files.s3_download(url, dst)
def download_and_install(uri, name=DEFAULT_MODULE_NAME, cache=True):
# type: (str, str, bool) -> None
"""Download, prepare and install a compressed tar file from S3 or local directory as a module.
The SageMaker Python SDK saves the user provided scripts as compressed tar files in S3.
This function downloads this compressed file and, if provided, transforms it
into a module before installing it.
This method is the predecessor of
:meth:`~sagemaker_containers.beta.framework.files.download_and_extract`
and has been kept for backward-compatibility purposes.
Args:
name (str): name of the script or module.
uri (str): the location of the module.
cache (bool): defaults to True. It will not download and install the module again if it is
already installed.
"""
should_use_cache = cache and exists(name)
if not should_use_cache:
with _files.tmpdir() as tmpdir:
module_path = os.path.join(tmpdir, "module_dir")
_files.download_and_extract(uri, module_path)
prepare(module_path, name)
install(module_path)
def run(module_name, args=None, env_vars=None, wait=True, capture_error=False):
# type: (str, list, dict, bool, bool) -> subprocess.Popen
"""Run Python module as a script.
Search sys.path for the named module and execute its contents as the __main__ module.
Since the argument is a module name, you must not give a file extension (.py). The module name
should be a valid absolute Python module name, but the implementation may not always enforce
this (e.g. it may allow you to use a name that includes a hyphen).
Package names (including namespace packages) are also permitted. When a package name is supplied
instead of a normal module, the interpreter will execute <pkg>.__main__ as the main module. This
behaviour is deliberately similar to the handling of directories and zipfiles that are passed to
the interpreter as the script argument.
Note This option cannot be used with built-in modules and extension modules written in C, since
they do not have Python module files. However, it can still be used for precompiled modules,
even if the original source file is not available. If this option is given, the first element
of sys.argv will be the full path to the module file (while the module file is being located,
the first element will be set to "-m"). As with the -c option, the current directory will be
added to the start of sys.path.
You can find more information at https://docs.python.org/3/using/cmdline.html#cmdoption-m
Example:
>>>import sagemaker_containers
>>>from sagemaker_containers.beta.framework import mapping, modules
>>>env = sagemaker_containers.training_env()
{'channel-input-dirs': {'training': '/opt/ml/input/training'},
'model_dir': '/opt/ml/model', ...}
>>>hyperparameters = env.hyperparameters
{'batch-size': 128, 'model_dir': '/opt/ml/model'}
>>>args = mapping.to_cmd_args(hyperparameters)
['--batch-size', '128', '--model_dir', '/opt/ml/model']
>>>env_vars = mapping.to_env_vars()
['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training',
'MODEL_DIR':'/opt/ml/model', ...}
>>>modules.run('user_script', args, env_vars)
SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \
SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128
--model_dir /opt/ml/model
Args:
module_name (str): module name in the same format required by python -m <module-name>
cli command.
args (list): A list of program arguments.
env_vars (dict): A map containing the environment variables to be written.
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
"""
args = args or []
env_vars = env_vars or {}
cmd = [_process.python_executable(), "-m", module_name] + args
_logging.log_script_invocation(cmd, env_vars)
if wait:
return _process.check_error(
cmd, _errors.ExecuteUserScriptError, capture_error=capture_error
)
else:
return _process.create(cmd, _errors.ExecuteUserScriptError, capture_error=capture_error)
def import_module(uri, name=DEFAULT_MODULE_NAME, cache=None): # type: (str, str, bool) -> module
"""Download, prepare and install a compressed tar file from S3 or provided directory as a
module.
SageMaker Python SDK saves the user provided scripts as compressed tar files in S3
https://github.com/aws/sagemaker-python-sdk.
This function downloads this compressed file, if provided, and transforms it as a module, and
installs it.
Args:
name (str): name of the script or module.
uri (str): the location of the module.
cache (bool): default True. It will not download and install the module again if it is
already installed.
Returns:
(module): the imported module
"""
_warning_cache_deprecation(cache)
_files.download_and_extract(uri, _env.code_dir)
prepare(_env.code_dir, name)
install(_env.code_dir)
try:
module = importlib.import_module(name)
six.moves.reload_module(module) # pylint: disable=too-many-function-args
return module
except Exception as e: # pylint: disable=broad-except
six.reraise(_errors.ImportModuleError, _errors.ImportModuleError(e), sys.exc_info()[2])
def run_module(
uri, args, env_vars=None, name=DEFAULT_MODULE_NAME, cache=None, wait=True, capture_error=False
):
# type: (str, list, dict, str, bool, bool, bool) -> subprocess.Popen
"""Download, prepare and executes a compressed tar file from S3 or provided directory as a
module.
SageMaker Python SDK saves the user provided scripts as compressed tar files in S3
https://github.com/aws/sagemaker-python-sdk.
This function downloads this compressed file, transforms it as a module, and executes it.
Args:
uri (str): the location of the module.
args (list): A list of program arguments.
env_vars (dict): A map containing the environment variables to be written.
name (str): name of the script or module.
cache (bool): If True it will avoid downloading the module again, if already installed.
wait (bool): If True run_module will wait for the user module to exit and check the exit
code, otherwise it will launch the user module with subprocess and return
the process object.
"""
_warning_cache_deprecation(cache)
env_vars = env_vars or {}
env_vars = env_vars.copy()
_files.download_and_extract(uri, _env.code_dir)
prepare(_env.code_dir, name)
install(_env.code_dir)
_env.write_env_vars(env_vars)
return run(name, args, env_vars, wait, capture_error)
def _warning_cache_deprecation(cache):
"""Placeholder docstring"""
if cache is not None:
msg = "the cache parameter is unnecessary anymore. Cache is always set to True"
warnings.warn(msg, DeprecationWarning) | /sagemaker_containers-2.8.6.post0.tar.gz/sagemaker_containers-2.8.6.post0/src/sagemaker_containers/_modules.py | 0.639849 | 0.159479 | _modules.py | pypi |
"""Placeholder docstring"""
from __future__ import absolute_import
import json
import textwrap
import traceback
from six.moves import http_client
from sagemaker_containers import _content_types, _encoders, _env, _errors, _functions, _worker
def default_model_fn(model_dir):
"""Function responsible to load the model.
For more information about model loading https://github.com/aws/sagemaker-python-sdk#model-loading.
Args:
model_dir (str): The directory where model files are stored.
Returns:
(obj) the loaded model.
"""
raise NotImplementedError(
textwrap.dedent(
"""
Please provide a model_fn implementation.
See documentation for model_fn at https://github.com/aws/sagemaker-python-sdk
"""
)
)
def default_input_fn(input_data, content_type):
"""Takes request data and de-serializes the data into an object for prediction.
When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server,
the model server receives two pieces of information:
- The request Content-Type, for example "application/json"
- The request data, which is at most 5 MB (5 * 1024 * 1024 bytes) in size.
The input_fn is responsible to take the request data and pre-process it before prediction.
Args:
input_data (obj): the request data.
content_type (str): the request Content-Type.
Returns:
(obj): data ready for prediction.
"""
return _encoders.decode(input_data, content_type)
def default_predict_fn(data, model):
"""Function responsible for model predictions.
Args:
model (obj): model loaded by model_fn
data: de-serializes data returned by input_fn
Returns:
(obj): data ready for prediction.
"""
raise NotImplementedError(
textwrap.dedent(
"""
Please provide a predict_fn implementation.
See documentation for predict_fn at https://github.com/aws/sagemaker-python-sdk
"""
)
)
def default_output_fn(prediction, accept):
"""Function responsible to serialize the prediction for the response.
Args:
prediction (obj): prediction returned by predict_fn .
accept (str): accept content-type expected by the client.
Returns:
(worker.Response): a Flask response object with the following args:
* Args:
response: the serialized data to return
accept: the content-type that the data was transformed to.
"""
return _worker.Response(response=_encoders.encode(prediction, accept), mimetype=accept)
class Transformer(object):
"""The Transformer is a proxy between the worker and the framework transformation functions.
It implements the default framework functions for serving.
Examples:
>>>import os
>>>from sagemaker_containers import _env, _modules, _transformer
>>>import Keras
>>>ServingEnv = _env.ServingEnv()
>>>
>>>def predict_fn(model, data):
>>> return model.predict(data)
>>>
>>>def model_fn(model_dir):
>>> return Keras.models.load_model(os.path.join(model_dir, 'minimlmodel'))
>>>
>>>transformer = _transformer.Transformer(predict_fn=predict_fn, model_fn=model_fn)
>>>
>>>mod = _modules.download_and_import(ServingEnv.module_dir, ServingEnv.module_name)
>>>transformer.load_user_fns(mod)
"""
def __init__(
self,
model_fn=None,
input_fn=None,
predict_fn=None,
output_fn=None,
transform_fn=None,
error_class=_errors.ClientError,
):
"""Default constructor. Wraps the any non default framework function in an error class to
isolate framework from user errors.
Args:
model_fn (fn): Function responsible to load the model.
input_fn (fn): Takes request data and de-serializes the data into an object for
prediction.
predict_fn (fn): Function responsible for model predictions.
output_fn (fn): Function responsible to serialize the prediction for the response.
transform_fn (fn): Function responsible for taking input data and returning a prediction
as a serialized response. This function takes the place of ``input_fn``,
``predict_fn``, and ``output_fn``.
error_class (Exception): Error class used to separate framework and user errors.
"""
self._model = None
self._model_fn = (
_functions.error_wrapper(model_fn, error_class) if model_fn else default_model_fn
)
if transform_fn and (input_fn or predict_fn or output_fn):
raise ValueError(
"Cannot use transform_fn implementation with input_fn, predict_fn, and/or output_fn"
)
if transform_fn is not None:
self._transform_fn = _functions.error_wrapper(transform_fn, error_class)
else:
self._transform_fn = self._default_transform_fn
self._input_fn = (
_functions.error_wrapper(input_fn, error_class) if input_fn else default_input_fn
)
self._predict_fn = (
_functions.error_wrapper(predict_fn, error_class) if predict_fn else default_predict_fn
)
self._output_fn = (
_functions.error_wrapper(output_fn, error_class) if output_fn else default_output_fn
)
self._error_class = error_class
def initialize(self): # type: () -> None
"""Execute any initialization necessary to start making predictions with the Transformer.
The default implementation is used to load the model.
This function is called by sagemaker_containers.beta.framework.worker.Worker,
before starting the Flask application.
The gunicorn server forks multiple workers, executing multiple Flask applications in
parallel.
This function will be called once per each worker.
It does not have return type or arguments.
"""
self._model = self._model_fn(_env.model_dir)
def transform(self): # type: () -> _worker.Response
"""Take a request with input data, deserialize it, make a prediction, and return a
serialized response.
Returns:
sagemaker_containers.beta.framework.worker.Response: a Flask response object with
the following args:
* response: the serialized data to return
* accept: the content type that the data was serialized into
"""
request = _worker.Request()
result = self._transform_fn(
self._model, request.content, request.content_type, request.accept
)
if isinstance(result, tuple):
# transforms tuple in Response for backwards compatibility
return _worker.Response(response=result[0], mimetype=result[1])
return result
def _default_transform_fn(self, model, content, content_type, accept):
"""Make predictions against the model and return a serialized response.
This serves as the default implementation of transform_fn, used when the user has not
implemented one themselves.
Args:
model (obj): model loaded by model_fn.
content: request content.
content_type (str): the request Content-Type.
accept (str): accept content-type expected by the client.
Returns:
sagemaker_containers.beta.framework.worker.Response or tuple:
the serialized response data and its content type, either as a Response object or
a tuple of the form (response_data, content_type)
"""
try:
data = self._input_fn(content, content_type)
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.UNSUPPORTED_MEDIA_TYPE)
prediction = self._predict_fn(data, model)
try:
result = self._output_fn(prediction, accept)
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.NOT_ACCEPTABLE)
return result
def _error_response(self, error, status_code): # pylint: disable=no-self-use
"""Placeholder docstring"""
body = json.dumps(
{
"error": error.__class__.__name__,
"error-message": str(error),
"stack-trace": traceback.format_exc(),
}
)
return _worker.Response(response=body, status=status_code, mimetype=_content_types.JSON) | /sagemaker_containers-2.8.6.post0.tar.gz/sagemaker_containers-2.8.6.post0/src/sagemaker_containers/_transformer.py | 0.862482 | 0.393909 | _transformer.py | pypi |
"""Placeholder docstring"""
from __future__ import absolute_import
import collections
import itertools
import json
import six
SplitResultSpec = collections.namedtuple("SplitResultSpec", "included excluded")
def to_env_vars(mapping): # type: (dict) -> dict
"""Transform a dictionary in a dictionary of env vars.
Example:
>>>env_vars = mapping.to_env_vars({'model_dir': '/opt/ml/model', 'batch_size': 25})
>>>
>>>print(args)
['MODEL_DIR', '/opt/ml/model', 'BATCH_SIZE', 25]
Args:
mapping (dict[str, object]): A Python mapping.
Returns:
(dict): Dictionary of env vars
"""
def format_key(key):
"""Decode a key, adds a SM_ prefix to the key and upper case it"""
if key:
decoded_name = "SM_%s" % str(key).upper()
return decoded_name
else:
return ""
def format_value(_mapping):
if six.PY3 and isinstance(_mapping, six.binary_type):
# transforms a byte string (b'') in unicode
return _mapping.decode("latin1")
elif _mapping is None:
return ""
elif isinstance(_mapping, six.string_types):
return str(_mapping)
else:
return json.dumps(_mapping, sort_keys=True, separators=(",", ":"), ensure_ascii=True)
return {format_key(k): format_value(v) for k, v in mapping.items()}
def to_cmd_args(mapping): # type: (dict) -> list
"""Transform a dictionary in a list of cmd arguments.
Example:
>>>args = mapping.to_cmd_args({'model_dir': '/opt/ml/model', 'batch_size': 25})
>>>
>>>print(args)
['--model_dir', '/opt/ml/model', '--batch_size', 25]
Args:
mapping (dict[str, object]): A Python mapping.
Returns:
(list): List of cmd arguments
"""
sorted_keys = sorted(mapping.keys())
def arg_name(obj):
string = _decode(obj)
if string:
return u"--%s" % string if len(string) > 1 else u"-%s" % string
else:
return u""
arg_names = [arg_name(argument) for argument in sorted_keys]
def arg_value(value):
if hasattr(value, "items"):
map_items = ["%s=%s" % (k, v) for k, v in sorted(value.items())]
return ",".join(map_items)
return _decode(value)
arg_values = [arg_value(mapping[key]) for key in sorted_keys]
items = zip(arg_names, arg_values)
return [item for item in itertools.chain.from_iterable(items)]
def _decode(obj): # type: (bytes or str or unicode or object) -> unicode # noqa ignore=F821
"""Decode an object to unicode.
Args:
obj (bytes or str or unicode or anything serializable): object to be decoded
Returns:
object decoded in unicode.
"""
if obj is None:
return u""
if six.PY3 and isinstance(obj, six.binary_type):
# transforms a byte string (b'') in unicode
return obj.decode("latin1")
elif six.PY3:
# PY3 strings are unicode.
return str(obj)
elif isinstance(obj, six.text_type):
# returns itself if it is unicode
return obj
else:
# decodes pY2 string to unicode
return str(obj).decode("utf-8")
def split_by_criteria(
dictionary, keys=None, prefix=None
): # type: (dict, set or list or tuple) -> SplitResultSpec
"""Split a dictionary in two by the provided keys.
Args:
dictionary (dict[str, object]): A Python dictionary
keys (sequence [str]): A sequence of keys which will be added the split criteria
prefix (str): A prefix which will be added the split criteria
Returns:
`SplitResultSpec` : A collections.namedtuple with the following attributes:
* Args:
included (dict[str, object]: A dictionary with the keys included in the criteria.
excluded (dict[str, object]: A dictionary with the keys not included in the
criteria.
"""
keys = keys or []
keys = set(keys)
included_items = {
k: dictionary[k]
for k in dictionary.keys()
if k in keys or (prefix and k.startswith(prefix))
}
excluded_items = {k: dictionary[k] for k in dictionary.keys() if k not in included_items}
return SplitResultSpec(included=included_items, excluded=excluded_items)
class MappingMixin(collections.Mapping):
"""Placeholder docstring"""
def properties(self): # type: () -> list
"""
Returns:
(list[str]) List of public properties
"""
_type = type(self)
return [_property for _property in dir(_type) if self._is_property(_property)]
def _is_property(self, _property):
"""Placeholder docstring"""
return isinstance(getattr(type(self), _property), property)
def __getitem__(self, k):
"""Placeholder docstring"""
if not self._is_property(k):
raise KeyError("Trying to access non property %s" % k)
return getattr(self, k)
def __len__(self):
"""Placeholder docstring"""
return len(self.properties())
def __iter__(self):
"""Placeholder docstring"""
items = {_property: getattr(self, _property) for _property in self.properties()}
return iter(items)
def __str__(self):
"""Placeholder docstring"""
return str(dict(self)) | /sagemaker_containers-2.8.6.post0.tar.gz/sagemaker_containers-2.8.6.post0/src/sagemaker_containers/_mapping.py | 0.88631 | 0.255791 | _mapping.py | pypi |
"""Placeholder docstring"""
from __future__ import absolute_import
import contextlib
import json
import os
import shutil
import tarfile
import tempfile
import boto3
from six.moves.urllib import parse
from sagemaker_containers import _env, _params
def write_success_file(): # type: () -> None
"""Create a file 'success' when training is successful. This file doesn't need to
have any content.
See: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html
"""
file_path = os.path.join(_env.output_dir, "success")
empty_content = ""
write_file(file_path, empty_content)
def write_failure_file(failure_msg): # type: (str) -> None
"""Create a file 'failure' if training fails after all algorithm output (for example,
logging) completes, the failure description should be written to this file. In a
DescribeTrainingJob response, Amazon SageMaker returns the first 1024 characters from
this file as FailureReason.
See: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html
Args:
failure_msg: The description of failure
"""
file_path = os.path.join(_env.output_dir, "failure")
write_file(file_path, failure_msg)
@contextlib.contextmanager
def tmpdir(suffix="", prefix="tmp", directory=None): # type: (str, str, str) -> None
"""Create a temporary directory with a context manager. The file is deleted when the
context exits.
The prefix, suffix, and dir arguments are the same as for mkstemp().
Args:
suffix (str): If suffix is specified, the file name will end with that suffix,
otherwise there will be no suffix.
prefix (str): If prefix is specified, the file name will begin with that prefix;
otherwise, a default prefix is used.
directory (str): If directory is specified, the file will be created in that directory;
otherwise, a default directory is used.
Returns:
str: path to the directory
"""
tmp = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=directory)
yield tmp
shutil.rmtree(tmp)
def write_file(path, data, mode="w"): # type: (str, str, str) -> None
"""Write data to a file.
Args:
path (str): path to the file.
data (str): data to be written to the file.
mode (str): mode which the file will be open.
"""
with open(path, mode) as f:
f.write(data)
def read_file(path, mode="r"):
"""Read data from a file.
Args:
path (str): path to the file.
mode (str): mode which the file will be open.
Returns:
"""
with open(path, mode) as f:
return f.read()
def read_json(path): # type: (str) -> dict
"""Read a JSON file.
Args:
path (str): Path to the file.
Returns:
(dict[object, object]): A dictionary representation of the JSON file.
"""
with open(path, "r") as f:
return json.load(f)
def download_and_extract(uri, path): # type: (str, str) -> None
"""Download, prepare and install a compressed tar file from S3 or local directory as
an entry point.
SageMaker Python SDK saves the user provided entry points as compressed tar files in S3
Args:
uri (str): the location of the entry point.
path (bool): The path where the script will be installed. It will not download and
install the if the path already has the user entry point.
"""
if not os.path.exists(path):
os.makedirs(path)
if not os.listdir(path):
with tmpdir() as tmp:
if uri.startswith("s3://"):
dst = os.path.join(tmp, "tar_file")
s3_download(uri, dst)
with tarfile.open(name=dst, mode="r:gz") as t:
t.extractall(path=path)
elif os.path.isdir(uri):
if uri == path:
return
if os.path.exists(path):
shutil.rmtree(path)
shutil.copytree(uri, path)
elif tarfile.is_tarfile(uri):
with tarfile.open(name=uri, mode="r:gz") as t:
t.extractall(path=path)
else:
shutil.copy2(uri, path)
def s3_download(url, dst): # type: (str, str) -> None
"""Download a file from S3.
Args:
url (str): the s3 url of the file.
dst (str): the destination where the file will be saved.
"""
url = parse.urlparse(url)
if url.scheme != "s3":
raise ValueError("Expecting 's3' scheme, got: %s in %s" % (url.scheme, url))
bucket, key = url.netloc, url.path.lstrip("/")
region = os.environ.get("AWS_REGION", os.environ.get(_params.REGION_NAME_ENV))
endpoint_url = os.environ.get(_params.S3_ENDPOINT_URL, None)
s3 = boto3.resource("s3", region_name=region, endpoint_url=endpoint_url)
s3.Bucket(bucket).download_file(key, dst) | /sagemaker_containers-2.8.6.post0.tar.gz/sagemaker_containers-2.8.6.post0/src/sagemaker_containers/_files.py | 0.711631 | 0.232114 | _files.py | pypi |
import logging
import pandas as pd
import scipy.stats as sp
from sagemaker_data_insights import PEARSON, ALLOWED_CROSS_COL_INSIGHTS
from sagemaker_data_insights import FEATURE_TYPES, FEATURE_DATA
from sagemaker_data_insights import FeatureType as ft
from sagemaker_data_insights.model_utils import _encode_features
def cross_column_stats(
left_feature_set: dict, right_feature_set: dict, requested_insights: list = None, n_jobs: int = 1
):
"""
Computes various pairwise statistics between two sets of feature vectors.
The following correlations are calculated:
{PEARSON}: Pearson Correlation is a linear correlation measure between two features. It is in the range [-1,1],
where 0 indicates that the features are independent and 1 (or -1) indicates that they are completely
linearly dependent. It is to be noted that pearson correlation only captures only linear relationships.
Parameters
----------
left_feature_set: dict
Represents the first set of feature vectors as
{
{FEATURE_DATA}: pandas.DataFrame
{FEATURE_TYPES}: dict(str:str) - maps column names to column types
}
- Only {NUMERIC}, {BINARY} and {CATEGORICAL} types are supported.
- Type Error is raised if the given feature type is not supported.
right_feature_set: dict
The second set of feature vectors, representation is same as left_feature_set.
requested_insights: list(str)
A list of the requested metrics to be returned.
- If not provided, all the insights in {ALLOWED_CROSS_COL_INSIGHTS} is returned.
n_jobs : int
number of cores to use in feature processing
Returns
----------
dict: cross column insights
The python dictionary maps each data insight key requested to the calculated data insights.
- Each data insight is a matrix of correlation values between two feature pairs. This is represented as the
following structure (returned by the pd.DataFrame.to_dict() function, with "split" orient):
{
"columns": List of feature names from [left_feature_set], representing the columns of the matrix
"index": List feature names from [right_feature_set], representing the row index of the matrix
"data": 2-D list of the correlation values, representing the correlation matrix
}
Example:
Consider a correlation matrix calculated for a particular insight, between [left_1, left_2] columns in
[left_feature_set] and [right_1, right_2] columns in [right_feature_set] shown below.
left_1 left_2
right_1 1.0 2.9
right_2 3.9 1.2
The correlation matrix would be represented as following in the response structure:
{
"columns": ["left_1", "left_2"]
"index": ["right_1", "right_2"]
"data": [[1.0, 2.9], [3.9, 1.2]]
}
"""
cross_column_insights = {}
if requested_insights is None:
requested_insights = ALLOWED_CROSS_COL_INSIGHTS
# Encoding feature columns
left_encoded = pd.DataFrame(
_encode_features(
left_feature_set[FEATURE_TYPES],
left_feature_set[FEATURE_DATA],
[ft.NUMERIC, ft.BINARY, ft.CATEGORICAL],
False,
n_jobs=n_jobs,
)["transformed_data"],
columns=left_feature_set[FEATURE_DATA].columns,
)
left_numeric_binary = [
name for name, ftype in left_feature_set[FEATURE_TYPES].items() if ftype in [ft.NUMERIC, ft.BINARY]
]
right_encoded = pd.DataFrame(
_encode_features(
right_feature_set[FEATURE_TYPES],
right_feature_set[FEATURE_DATA],
[ft.NUMERIC, ft.BINARY, ft.CATEGORICAL],
False,
n_jobs=n_jobs,
)["transformed_data"],
columns=right_feature_set[FEATURE_DATA].columns,
)
right_numeric_binary = [
name for name, ftype in right_feature_set[FEATURE_TYPES].items() if ftype in [ft.NUMERIC, ft.BINARY]
]
# Calculating correlation between numeric & binary features in the left feature array to the numeric & binary
# features in the right feature array
if PEARSON in requested_insights:
if left_numeric_binary and right_numeric_binary:
correlations_df = pd.DataFrame(columns=left_numeric_binary, index=right_numeric_binary)
for col in left_numeric_binary:
for index in right_numeric_binary:
correlations_df.at[index, col] = sp.pearsonr(left_encoded[col], right_encoded[index])[0]
cross_column_insights[PEARSON] = correlations_df.to_dict("split")
elif right_numeric_binary:
logging.warning(
"WARNING: %s correlation for numeric and binary features can not be calculated because the left "
"feature set has no numeric or binary features.",
PEARSON,
)
elif left_numeric_binary:
logging.warning(
"WARNING: %s correlation for numeric and binary features can not be calculated because the right "
"feature set has no numeric or binary features.",
PEARSON,
)
else:
logging.warning(
"WARNING: %s correlation for numeric and binary features can not be calculated because neither of the "
"feature sets have numeric or binary features.",
PEARSON,
)
return cross_column_insights | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/cross_column_stats.py | 0.835013 | 0.703244 | cross_column_stats.py | pypi |
import logging
import pandas as pd
import numpy as np
import scipy
import sagemaker_data_insights.const as cs
from sagemaker_data_insights.const import TaskType as tt
from sagemaker_data_insights.histogram_functions import (
_verify_y,
calc_robust_histogram,
robust_histogram_num_outliers,
_unique_without_whitespaces,
)
from sagemaker_data_insights.insights import Insights
from sagemaker_data_insights.model_utils import _get_label_encoder
REQUIRED_TARGET_METRICS = {
"labels",
"label_counts",
"cardinality",
"max",
"min",
"numeric_finite_count",
"nrows",
"null_like_count",
"empty_count",
"whitespace_count",
}
def _check_required_target_metrics_provided(metrics: dict):
missing_metrics = REQUIRED_TARGET_METRICS - set(metrics.keys())
if missing_metrics:
raise ValueError(f"Missing following target metrics: {missing_metrics}")
def analyze_target_regression(
y: pd.Series, metrics: dict, num_bins: int = 20, max_num_common_labels: int = 10, max_num_outliers: int = 5,
):
"""
Target column analyzer for regression task
Parameters
----------
y : pandas.Series
target column (could be raw. Doesn't have to be encoded)
metrics : dict
dictionary that must include all the keys in REQUIRED_TARGET_METRICS. While `analyze_target_regression`
is usually applied on a sample of the data, the metrics should be calculated on the whole data or on a larger
sample
num_bins : int >= 3
number of bins in histograms
max_num_common_labels : int >= 1
max number of most common labels to return in `labels` and `label_counts` fields
max_num_outliers : int >= 0
max number of outliers to to return in `low_outlier_idxs` and `high_outlier_idxs` fields
Returns
-------
dict: data insights metrics
labels: list of all labels in the target column sorted by descending count order
label_counts: list of label counts sorted by descending count order
valid_ratio: ratio of the number of numeric finite values to the number of samples
name: name of target column
outliers_ratio: ratio between number of outliers to number of samples
mean: mean of numeric values (outliers included)
median: median of numeric values (outliers included)
skew: skew of numeric values (outliers included). Calculated using scipy.stats.skew
kurtosis: kurtosis of numeric values (outliers included). Calculated using scipy.stats.kurtosis
histogram: histogram of numeric values (outliers included). Calculated using numpy.histogram
robust_histogram: robust_histogram of numeric values (outliers included). Calculated using calc_robust_histogram
metrics: metrics provided in input
{cs.INSIGHTS}: a list of insights. Can include the following insights: SKEWED_TARGET, HEAVY_TAILED_TARGET,
TARGET_OUTLIERS, REGRESSION_FREQUENT_LABEL, REGRESSION_NONNUMERIC and REGRESSION_MANY_NONNUMERIC. The
insights are documented in `insights.py`
dict: auxiliary dict including the following:
label_encoder: `LabelEncoder` transform
valid_row_idxs: (np.ndarray) valid rows indicator
low_outlier_idxs: (list) indexes of low value outliers for regression
high_outlier_idxs: (list) indexes of high value outliers for regression
"""
_check_required_target_metrics_provided(metrics)
label_encoder, labels, label_counts, y_encoded, _ = _analyze_target(y, tt.REGRESSION, metrics)
valid_rows = np.isfinite(y_encoded).ravel()
valid_encoded = y_encoded[valid_rows]
count, bin_edges = np.histogram(valid_encoded, bins=num_bins)
histogram = {
"hist_count": count.astype(int).tolist(),
"hist_edges": bin_edges.astype(float).tolist(),
"lower_bin_is_outlier": False,
"upper_bin_is_outlier": False,
}
robust_histogram = calc_robust_histogram(valid_encoded, num_bins=num_bins)
# count outliers to calculate `outliers_ratio`
num_outliers = robust_histogram_num_outliers(robust_histogram)
valid_row_idxs = np.nonzero(valid_rows)[0]
# get idxs of lowest outliers to be output as `low_outlier_idxs`
low_outlier_idxs = []
if robust_histogram["lower_bin_is_outlier"]:
for idx in np.argsort(valid_encoded.ravel())[:max_num_outliers]:
value = valid_encoded[idx]
if value < robust_histogram["hist_edges"][1]:
low_outlier_idxs.append(valid_row_idxs[idx])
# get idxs of highest outliers to be output as `high_outlier_idxs`
high_outlier_idxs = []
if robust_histogram["upper_bin_is_outlier"]:
for idx in reversed(np.argsort(valid_encoded.ravel())[-max_num_outliers:]):
value = valid_encoded[idx]
if value > robust_histogram["hist_edges"][-2]:
high_outlier_idxs.append(valid_row_idxs[idx])
outliers_ratio = float(num_outliers / valid_encoded.shape[0])
skew = float(scipy.stats.skew(valid_encoded.ravel()))
kurtosis = float(scipy.stats.kurtosis(valid_encoded.ravel()))
# Check for target insights
insights = _regression_insights(outliers_ratio, skew, kurtosis, labels, label_counts, metrics)
return (
{
"labels": labels[:max_num_common_labels],
"label_counts": label_counts[:max_num_common_labels],
"valid_ratio": float(metrics["numeric_finite_count"] / metrics["nrows"]),
"missing_ratio": float(
(metrics["null_like_count"] + metrics["empty_count"] + metrics["whitespace_count"]) / metrics["nrows"]
),
"name": y.name,
"outliers_ratio": outliers_ratio,
"mean": float(np.nanmean(valid_encoded)),
"median": float(np.nanmedian(valid_encoded)),
"skew": skew,
"kurtosis": kurtosis,
"histogram": histogram,
"robust_histogram": robust_histogram,
cs.INSIGHTS: insights,
"metrics": metrics,
},
{
"label_encoder": label_encoder,
"invalid_row_idxs": np.nonzero(~valid_rows)[0],
"low_outlier_idxs": low_outlier_idxs,
"high_outlier_idxs": high_outlier_idxs,
},
)
def analyze_target_classification(
y: pd.Series, metrics: dict, max_num_common_labels: int = 10,
):
"""
Target column analyzer for classification task
Parameters
----------
y : pandas.Series
target column (not encoded)
metrics : dictionary that must include all the keys in REQUIRED_TARGET_METRICS.
While `analyze_target_classification` is usually applied on a sample of the data,
the metrics should be calculated on the whole data or on a larger sample. See const.py
max_num_common_labels : int >= 1
max number of most common labels to return in `labels` and `label_counts` fields
Returns
-------
dict: data insights metrics
labels: list of all labels in the target column sorted by descending count order
label_counts: list of label counts sorted by descending count order
valid_ratio: ratio of the number of not null like values to the number of samples
name: name of target column
frequent_elements: calculated based on `labels` and `label_counts` provided in metrics
metrics: metrics provided in input
insights: a list of insights. Can include the following insights: VERY_SMALL_MINORITY, HIGH_TARGET_CARDINALITY,
RARE_TARGET_LABEL and SKEWED_LABEL_FREQUENCY. The insights are documented in `insights.py`
dict: auxiliary dict including the following:
label_encoder: `LabelEncoder` transform
valid_row_idxs: (np.ndarray) valid rows indicator
y_map: dict. label_encoder mapping e.g. {0: 'dog', 1: 'cat', 2: 'mouse'}
task: str either BINARY_CLASSIFICATION or MULTICLASS_CLASSIFICATION
"""
_check_required_target_metrics_provided(metrics)
try:
# When the data type of y is string: Null, empty and cells of only whitespace are considered missing
valid_rows = (~pd.isnull(y)) & (y.str.strip() != "")
except AttributeError:
# When the data type of y is not string: only Nulls are considered missing
valid_rows = ~pd.isnull(y)
y = y[valid_rows]
task = tt.BINARY_CLASSIFICATION if len(np.unique(y.to_numpy().astype(str))) == 2 else tt.MULTICLASS_CLASSIFICATION
label_encoder, labels, label_counts, _, sample_size = _analyze_target(y, task, metrics)
y_map = {label_encoder.transform([label])[0]: label for label in labels}
# Check for target insights
insights = _classification_insights(task, labels, label_counts, sample_size)
sum_label_counts = np.sum(label_counts)
return (
{
"labels": labels[:max_num_common_labels],
"label_counts": label_counts[:max_num_common_labels],
"missing_ratio": float(
(metrics["null_like_count"] + metrics["empty_count"] + metrics["whitespace_count"]) / metrics["nrows"]
),
"valid_ratio": float(
(metrics["nrows"] - metrics["null_like_count"] - metrics["empty_count"] - metrics["whitespace_count"])
/ metrics["nrows"]
),
"name": y.name,
"frequent_elements": {
"value": labels[:max_num_common_labels],
"frequency": [float(lc / sum_label_counts) for lc in label_counts[:max_num_common_labels]],
},
"insights": insights,
"metrics": metrics,
},
{
"label_encoder": label_encoder,
"y_map": y_map,
"task": task,
"invalid_row_idxs": np.nonzero(~np.array(valid_rows))[0],
},
)
def _analyze_target(y: pd.Series, task: str, metrics: dict):
# This function includes code that is shared between analyze_target_regression and analyze_target_classification
y_numpy = y.dropna().to_numpy()
_verify_y(y_numpy, task)
y_numpy = y_numpy.astype(str)
label_encoder = _get_label_encoder(task, y_numpy)
if not isinstance(metrics["labels"], list) or not isinstance(metrics["label_counts"], list):
unique, counts = _unique_without_whitespaces(y_numpy)
labels = unique.tolist()
label_counts = counts.tolist()
sample_size = len(y_numpy)
else:
labels = metrics["labels"]
label_counts = metrics["label_counts"]
sample_size = metrics["nrows"]
most_common_label_indexes = np.argsort(-np.array(label_counts))
labels = np.array(labels)[most_common_label_indexes].astype(str).tolist()
label_counts = np.array(label_counts)[most_common_label_indexes].astype(int).tolist()
y_encoded = label_encoder.transform(y.to_numpy().astype(str)).ravel().astype(float)
y_encoded[pd.isnull(y).to_numpy()] = np.nan
return label_encoder, labels, label_counts, y_encoded, sample_size
def _regression_insights(outliers_ratio, skew, kurtosis, labels, label_counts, metrics):
insights = []
if outliers_ratio > 0:
if abs(skew) > Insights.SKEWED_TARGET_THRESHOLD:
insights.append(Insights.generate(Insights.SKEWED_TARGET, Insights.HIGH))
elif kurtosis > Insights.HEAVY_TAILED_TARGET_THRESHOLD:
insights.append(Insights.generate(Insights.HEAVY_TAILED_TARGET, Insights.HIGH))
elif kurtosis > Insights.TARGET_OUTLIERS_THRESHOLD:
insights.append(Insights.generate(Insights.TARGET_OUTLIERS, Insights.MEDIUM))
majority_label_frequency = label_counts[0] / metrics["nrows"]
allowed_frequency = Insights.ALLOWED_FREQUENCY_FACTOR / metrics["cardinality"]
if majority_label_frequency > max(Insights.ALLOWED_FREQUENCY, allowed_frequency):
insights.append(
Insights.generate(
Insights.REGRESSION_FREQUENT_LABEL,
Insights.MEDIUM,
{"label": labels[0], "frequency": majority_label_frequency},
)
)
non_numeric_count = metrics["nrows"] - metrics["numeric_finite_count"]
non_numeric_frequency = non_numeric_count / metrics["nrows"]
if non_numeric_frequency > 0:
info = {
"frequency": non_numeric_frequency,
"values": list(filter(lambda x: not np.isfinite(pd.to_numeric(x, errors="coerce")), labels))[
: Insights.NUM_NONUMERIC_LABELS
],
}
if non_numeric_frequency > Insights.REGRESSION_MANY_NONNUMERIC_THRESHOLD:
insights.append(Insights.generate(Insights.REGRESSION_MANY_NONNUMERIC, Insights.HIGH, info))
else:
insights.append(Insights.generate(Insights.REGRESSION_NONNUMERIC, Insights.MEDIUM, info))
return insights
def _classification_insights(task, labels, label_counts, sample_size):
insights = []
if task == tt.BINARY_CLASSIFICATION:
for label, count in zip(labels, label_counts):
if count < Insights.VERY_SMALL_MINORITY_THRESHOLD:
insights.append(
Insights.generate(
Insights.VERY_SMALL_MINORITY,
Insights.HIGH,
{"label": label, "count": count, "sample_size": sample_size, "ratio": count / sample_size},
)
)
elif task == tt.MULTICLASS_CLASSIFICATION:
if len(labels) > Insights.HIGH_TARGET_CARDINALITY_THRESHOLD:
insights.append(
Insights.generate(Insights.HIGH_TARGET_CARDINALITY, Insights.MEDIUM, {"cardinality": len(labels)})
)
else:
for label, count in zip(labels, label_counts):
if count <= Insights.RARE_TARGET_LABEL_THRESHOLD:
insights.append(
Insights.generate(Insights.RARE_TARGET_LABEL, Insights.HIGH, {"label": label, "count": count})
)
elif count < label_counts[0] * Insights.SKEWED_LABEL_FREQUENCY_RATIO:
insights.append(
Insights.generate(
Insights.SKEWED_LABEL_FREQUENCY,
Insights.MEDIUM,
{
"label": label,
"count": count,
"most_frequent_label": labels[0],
"most_frequent_label_count": label_counts[0],
},
)
)
return insights
# The maximum number of unique labels in a numeric target column to treat the problem as classification.
TASK_TYPE_MAX_NUM_UNIQUES_FOR_NUMERIC_MULTICLASS = 100
# The maximum number of unique labels in a numeric target column under which we always treat the problem as
# regression.
TASK_TYPE_MAX_NUM_UNIQUES_FOR_OBVIOUS_MULTICLASS = 5
# By how many times the target column's maximum should exceed the number of labels to treat the column as ordinal.
TASK_TYPE_MAX_NUM_UNIQUES_MULTIPLE_FOR_ORDINAL = 10
# The minimum fraction of values which should be numeric for the target to be treated as numeric.
TASK_TYPE_MIN_FRACTION_FOR_NUMERIC = 0.5
# The minimum value that #uniques / #rows should be to allow regression when the labels are mostly integers.
TASK_TYPE_MIN_FRACTION_INTEGER_UNIQUES_FOR_REGRESSION = 0.015
# The minimum fraction of values which should be non-integer floats in order to treat the problem as regression.
TASK_TYPE_MIN_FRACTION_NONINTEGER_FLOATS_FOR_REGRESSION = 0.01
# Maximum number of supported classes for multiclass classification problems.
TASK_TYPE_MAX_NUM_SUPPORTED_CLASSES = 2000
def determine_task(metrics: dict):
"""Determines the problem type based on basic statistics about the target column.
- The logic is copied from `determine_task` of AIAlgorithmsPipelineRecommender
Parameters
----------
metrics : dict
must include all the following keys calculated on the target column: `cardinality`, `integer_count`, `max`,
`min`, `numeric_finite_count` and `nrows`. Keys definitions can be found in const.py.
Returns
-------
str: The identified problem type in [tt.REGRESSION, tt.BINARY_CLASSIFICATION, tt.MULTICLASS_CLASSIFICATION]
"""
cardinality = metrics["cardinality"]
num_integers = metrics["integer_count"]
num_numeric_finite = metrics["numeric_finite_count"]
num_rows = metrics["nrows"]
target_max = metrics["max"]
target_min = metrics["min"]
# These guarantees should be in place before this function is called.
assert num_rows > 0, "Cannot determine problem type with no rows sampled."
assert cardinality >= 2, f"Cannot determine problem type from target column with {cardinality} unique values."
if cardinality == 2:
logging.info("determine_task, task = %s", tt.BINARY_CLASSIFICATION)
return tt.BINARY_CLASSIFICATION
if num_numeric_finite > TASK_TYPE_MIN_FRACTION_FOR_NUMERIC * num_rows:
# Target column is mostly numeric.
if cardinality <= TASK_TYPE_MAX_NUM_UNIQUES_FOR_OBVIOUS_MULTICLASS:
# When there are not many labels, use multiclass classification even if the labels are non-integer floats.
logging.info("determine_task, task = %s", tt.MULTICLASS_CLASSIFICATION)
return tt.MULTICLASS_CLASSIFICATION
fraction_noninteger_floats = 1 - (num_integers / num_numeric_finite)
if fraction_noninteger_floats >= TASK_TYPE_MIN_FRACTION_NONINTEGER_FLOATS_FOR_REGRESSION:
# Most of the values are non-integer floats.
logging.info("determine_task, task = %s", tt.REGRESSION)
return tt.REGRESSION
ordinal_encoded = target_min >= 0 and target_max <= cardinality * TASK_TYPE_MAX_NUM_UNIQUES_MULTIPLE_FOR_ORDINAL
if not ordinal_encoded and (cardinality / num_rows) >= TASK_TYPE_MIN_FRACTION_INTEGER_UNIQUES_FOR_REGRESSION:
# The spread of labels is very wide, so treat the problem as regression despite mostly integer labels.
logging.info("determine_task, task = %s", tt.REGRESSION)
return tt.REGRESSION
if cardinality <= TASK_TYPE_MAX_NUM_UNIQUES_FOR_NUMERIC_MULTICLASS:
# Values are mostly integers, and there are not too many labels.
logging.info("determine_task, task = %s", tt.MULTICLASS_CLASSIFICATION)
return tt.MULTICLASS_CLASSIFICATION
raise ValueError(
f"It is unclear whether the problem type should be {tt.MULTICLASS_CLASSIFICATION} or {tt.REGRESSION}."
f" Please specify the problem type manually and retry."
)
# Target is mostly non-numeric.
if cardinality <= TASK_TYPE_MAX_NUM_SUPPORTED_CLASSES:
# Less than half of the labels are numeric, and there are not "too many" distinct values.
logging.info("determine_task, task = %s", tt.MULTICLASS_CLASSIFICATION)
return tt.MULTICLASS_CLASSIFICATION
# There are too many distinct values for multiclass-classification.
raise ValueError(
f"The number of unique labels {cardinality} is larger than the maximum number of supported classes of"
f" {TASK_TYPE_MAX_NUM_SUPPORTED_CLASSES}. Please verify that the target column is set correctly and retry."
) | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/analyze_target.py | 0.829803 | 0.537345 | analyze_target.py | pypi |
import numpy as np
import pandas as pd
from sagemaker_data_insights.const import TaskType as tt
def calc_robust_histogram( # noqa: C901
x: np.ndarray,
y: np.ndarray = None,
task=None,
num_bins=20,
stds=5,
robust_std_percentile=5,
robust_histogram_eps=1e-10,
):
"""
Calculates robust histogram for a vector
Parameters
----------
x : np.ndarray
feature data. A column numpy array of size (height,). All values must be valid floats
y : np.ndarray or None
target column. A column numpy array of size (height,). When the task is classification, y cannot contain NaN or
None values
task : str in [REGRESSION, BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION]
When y is not None task must be provided
num_bins : int >= 3
number of bins in histogram
stds : float > 0
Values which are farther away than `stds` robust standard deviations from the robust mean are considered
outliers
robust_std_percentile : int in [1, 99]
Robust standard deviation used for outlier detection is calculate on all data between percentile
robust_std_percentile to 100 - robust_std_percentile
robust_histogram_eps: float
Small value used to pad some internal values. All values in [lower_bound, upper_bound) are valid. In order to
avoid having np.max(x) always becoming an outlier we add a small float to it
Returns
-------
dict : data insights histogram
hist_count: list(int)
Number of items in each histogram bar
hist_edges: list(float)
Histogram edges
lower_bin_is_outlier: boolean
Indicator whether the left most bin is an outliers bin
upper_bin_is_outlier: boolean
Indicator whether the right most bin is an outliers bin
target_avg: list(float)
The average of the target column for each histogram bar. This field exists only when y is provided and the
task is regression
target_std: list(float)
The standard deviation of the target column for each histogram bar. This field exists only when y is
provided and the task is regression
target_labels: dict
This field exists only when y is provided and the task is classification
The dict keys are the target labels. The value for each key is a list(float) indicating the percentage of
labels for each histogram bar that equal the key
"""
if np.sum(~np.isfinite(x)) > 0:
raise ValueError("Error: x contains NaN or infinite values")
if num_bins < 3:
raise ValueError("Error: num_bins < 3")
ps = np.percentile(x, [robust_std_percentile, 100 - robust_std_percentile])
std = np.std(np.clip(x, ps[0], ps[1]))
if std <= robust_histogram_eps:
std = np.std(x)
med = np.median(x)
max_x = np.max(x)
min_x = np.min(x)
# All values in [lower_bound, upper_bound) are valid
upper_bound = min(max_x + robust_histogram_eps, med + stds * std)
lower_bound = max(min_x, med - stds * std)
# Whether lower and upper outliers exists
has_upper_outliers = sum(x > upper_bound) > 0
has_lower_outliers = sum(x < lower_bound) > 0
if has_lower_outliers or has_upper_outliers:
num_bins -= int(has_lower_outliers) + int(has_upper_outliers)
bin_width = (upper_bound - lower_bound) / num_bins
bins = [lower_bound + bin_width * i for i in range(num_bins + 1)]
if has_lower_outliers:
# Add bin for lower outliers
bins = [min_x] + bins
if has_upper_outliers:
# Add bin for upper outliers
bins = bins + [max_x]
else:
bins = num_bins
count, bin_edges = np.histogram(x, bins=bins)
h = {
"hist_count": count.astype(int).tolist(),
"hist_edges": bin_edges.astype(float).tolist(),
"lower_bin_is_outlier": bool(has_lower_outliers),
"upper_bin_is_outlier": bool(has_upper_outliers),
}
if y is not None:
_verify_y(y, task)
if task == tt.REGRESSION:
h["target_avg"] = []
h["target_std"] = []
else:
h["target_labels"] = {}
all_target_labels = np.unique(y).tolist()
for y_label in all_target_labels:
h["target_labels"][y_label] = []
for idx in range(count.shape[0]):
# y_ is the part of y which belongs to the bin of index "idx". It is used to calculate statistics for the
# target column for this bin
if idx == 0:
y_ = y[x < bin_edges[idx + 1]]
elif idx == count.shape[0] - 1:
y_ = y[x >= bin_edges[idx]]
else:
y_ = y[(x >= bin_edges[idx]) & (x < bin_edges[idx + 1])]
if task == tt.REGRESSION:
# Add target statistics for bin "idx" when the task is regression using the vector y_
_regression_append(y_, h["target_avg"], h["target_std"])
else:
# Add target statistics for bin "idx" when the task is classification using the vector y_
_classification_append(y_, h["target_labels"], all_target_labels)
return h
def robust_histogram_num_outliers(robust_histogram):
num_outliers = 0
if robust_histogram["lower_bin_is_outlier"]:
num_outliers += robust_histogram["hist_count"][0]
if robust_histogram["upper_bin_is_outlier"]:
num_outliers += robust_histogram["hist_count"][-1]
return num_outliers
def _frequent_element_helper(x: np.ndarray, y: np.ndarray, max_num_elements: int):
"""
Prepares base factors that will bed needed for frequency calculation
"""
valid_idxs = ~pd.isnull(x)
x = x[valid_idxs]
if y is not None:
y = y[valid_idxs]
unique, counts = _unique_without_whitespaces(x)
num_values = np.sum(counts)
# sort the keys alphabetically so ties in counts will be broken alphabetically
sorting_indexes = np.argsort(unique)
unique = unique[sorting_indexes]
counts = counts[sorting_indexes]
# include only max_num_elements most frequent elements
indices = np.argsort(-counts, kind="stable")[:max_num_elements]
return x, y, unique, counts, num_values, indices
def calc_frequent_elements(x: np.ndarray, y: np.ndarray = None, task=None, max_num_elements=10, sort_type="frequency"):
"""
Gather statistics about the frequent elements for a vector
Parameters
----------
x : np.ndarray
feature data. A column numpy array of size (height,). Data type must be sortable (by numpy)
y : np.ndarray or None
target column. A column numpy array of size (height,). When the task is classification, y cannot contain NaN or
None values
task : str in [REGRESSION, BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION]
When y is not None task must be provided
max_num_elements : int > 0
maximum number of elements to include in the response. The top max_num_elements most frequent elements are
returned. Ties are broken by ascending alphabetical order
sort_type : str in ['frequency', 'value']
whether to return the result sorted by the frequency or ascending alphabetically by value
dw_light_mode: True or False
For Data Wrangler Embedded Data Insight Chart only
Returns
-------
dict : data insights frequent elements stats
value: list(str)
Most frequent items in the data. Converted to strings
frequency: list(float)
Frequency of each element
target_avg: list(float)
The average of the target column for each feature value. This field exists only when y is provided and the
task is regression
target_std: list(float)
The standard deviation of the target column for each feature value. This field exists only when y is
provided and the task is regression
target_labels: dict
This field exists only when y is provided and the task is classification
The dict keys are the target labels. The value for each key is a list(float) indicating the percentage of
labels for each feature value that equal the key
"""
assert sort_type in ["frequency", "value"]
x, y, unique, counts, num_values, indices = _frequent_element_helper(x, y, max_num_elements)
f = {"value": unique[indices], "frequency": counts[indices] / num_values}
# sort according to sort_type if required
if sort_type != "frequency":
sorting_indexes = np.argsort(f[sort_type])
f["value"] = np.array(f["value"])[sorting_indexes]
f["frequency"] = np.array(f["frequency"])[sorting_indexes]
f["value"] = f["value"].tolist()
f["frequency"] = f["frequency"].tolist()
if y is not None:
_verify_y(y, task)
if task == tt.REGRESSION:
f["target_avg"] = []
f["target_std"] = []
else:
f["target_labels"] = {}
all_target_labels = np.unique(y).tolist()
for y_label in all_target_labels:
f["target_labels"][y_label] = []
for frequent_value in f["value"]:
# y_ is the part of y for rows where x == frequent_value. It is used to calculate statistics for the target
# column
y_ = y[x == frequent_value]
if task == tt.REGRESSION:
# Add target statistics for rows where x == frequent_value when the task is regression using the
# vector y_
_regression_append(y_, f["target_avg"], f["target_std"])
else:
# Add target statistics for rows where x == frequent_value when the task is classification using the
# vector y_
_classification_append(y_, f["target_labels"], all_target_labels)
return f
def _verify_y(y: np.array, task: str):
if task not in [tt.REGRESSION, tt.BINARY_CLASSIFICATION, tt.MULTICLASS_CLASSIFICATION]:
raise ValueError(
"Error: when y is provided task must be REGRESSION, BINARY_CLASSIFICATION or MULTICLASS_CLASSIFICATION"
)
if task in [tt.BINARY_CLASSIFICATION, tt.MULTICLASS_CLASSIFICATION, tt.CLASSIFICATION]:
if any(value is np.nan for value in y) > 0:
raise ValueError("Error: nans are not allowed in y for classification task")
if any(value is None for value in y) > 0:
raise ValueError("Error: None are not allowed in y for classification task")
def _regression_append(y_, target_avg, target_std):
# Add target statistics when the task is regression. y_ is usually a subset of the target column where some other
# condition is satisfied. For example when the value of a feature x belongs to some range or equals to some value
if y_.shape[0] == 0:
# When y_ is empty, missing value is the appropriate target value
target_avg.append(np.nan)
target_std.append(np.nan)
else:
target_avg.append(float(np.nanmean(y_)))
target_std.append(float(np.nanstd(y_)))
def _classification_append(y_, target_labels, all_target_labels):
unique, counts = np.unique(y_, return_counts=True)
unique = unique.tolist()
count_dict = dict(zip(unique, counts))
for label in all_target_labels:
target_labels[label].append(float(count_dict[label] / y_.shape[0]) if label in count_dict.keys() else float(0))
def _unique_without_whitespaces(x):
"""
Returns the list of unique items with their counts excluding items of only whitespaces. Items of only whitespaces
are considered missing thus they are not valid keys for frequent elements plots
"""
unique, counts = np.unique(x, return_counts=True)
unique_ = []
counts_ = []
for u, c in zip(unique, counts):
if str(u).strip() != "":
unique_.append(u)
counts_.append(int(c))
return np.array(unique_), np.array(counts_) | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/histogram_functions.py | 0.831725 | 0.78156 | histogram_functions.py | pypi |
import pandas as pd
import numpy as np
import re
import scipy
import logging
import sagemaker_data_insights.const as cs
from sagemaker_data_insights.const import FeatureType as ft
from sagemaker_data_insights.const import TaskType as tt
from sagemaker_data_insights.insights import Insights
from sagemaker_data_insights.histogram_functions import (
calc_frequent_elements,
calc_robust_histogram,
robust_histogram_num_outliers,
)
def missing_ratio(metrics: dict):
return float((metrics["null_like_count"] + metrics["empty_count"] + metrics["whitespace_count"]) / metrics["nrows"])
def valid_ratio(metrics: dict, feature_type: str):
if feature_type == ft.NUMERIC:
return float(metrics["numeric_finite_count"] / metrics["nrows"])
elif feature_type in [ft.CATEGORICAL, ft.BINARY, ft.TEXT]:
return float(
(metrics["nrows"] - metrics["null_like_count"] - metrics["empty_count"] - metrics["whitespace_count"])
/ metrics["nrows"]
)
elif feature_type == ft.DATETIME:
return float(metrics["datetime_count"] / metrics["datetime_rows_parsed"])
elif feature_type == ft.VECTOR:
return 1 - missing_ratio(metrics)
raise ValueError(f"Unsupported feature type {feature_type}")
def _numpy_conversion(x: pd.Series, y: pd.Series):
"""
Converts original pandas column data to numpy and excludes null value.
Parameters
----------
x : pandas.Series
raw column data
y : pandas.Series or Nones
raw target column data(if any)
Returns
-------
x_numpy : np.ndarray
Non-null x in numpy
y_numpy : None or np.ndarray
None if y is None, otherwise non-null y in numpy
"""
x_numpy = x.to_numpy().reshape((-1, 1))
valid_idxs = ~pd.isnull(x_numpy)
x_numpy = x_numpy[valid_idxs].astype(str).reshape((-1, 1))
y_numpy = None if y is None else y.to_numpy().reshape((-1, 1))[valid_idxs].reshape((-1, 1))
return x_numpy, y_numpy
def get_feature_transform_and_transformed_x(feature_type: str, x_numpy: np.ndarray):
"""
Gets the default feature transform used by data_insights and applys it to input numpy array.
Parameters
----------
feature_type : str
x_numpy : np.ndarray
Returns
-------
feature_transform : Default feature transform used by data_insights
x_transformed : x after applying feature transfor
"""
from sagemaker_data_insights.utils.feature_transform import get_feature_transform
feature_transform = get_feature_transform(feature_type, True).fit(x_numpy)
x_transformed = feature_transform.transform(x_numpy)
return feature_transform, x_transformed
def analyze_feature( # noqa: C901
x: pd.Series,
y: pd.Series,
task: str,
feature_type: str,
metrics: dict,
num_bins: int = 20,
random_state: int = 0,
n_jobs: int = 1,
requested_stats: list = None,
):
"""
Feature analyzer. Provides ML relevant statistics about the feature. Different statistics will be derived for each
feature type.
Parameters
----------
x : pandas.Series
raw feature column vector (e.g. NOT encoded using one hot encoder)
y : pandas.Series or None
(When y is not provided, all statistics that depend on the target column are not calculated)
Encoded and clean target column. For regression, all values must be finite floats (np.nan are not allowed).
For classification, the labels must be encoded as numeric integers consecutive and starting from 0. For both
regression and classification, it's recommended to use the label_encoder provided by `analyze_target_regression`
or `analyze_target_classification` to encode the target column. Note that `analyze_target_regression` returns
a list of invalid row indexes that must be removed from the data before calling `_baseline_prediction_power`
task : str or None (must be provided when y is provided)
REGRESSION, BINARY_CLASSIFICATION or MULTICLASS_CLASSIFICATION
feature_type: str
NUMERIC, CATEGORICAL, TEXT, DATETIME or BINARY. If unknown, use `get_feature_type`
metrics : dictionary that must include all the following keys: nrows, numeric_finite_count, cardinality and
empty_count. See the descriptions in const.py
num_bins : int >= 3
number of bins to use in histograms. In some cases, this is used as the decision threshold between producing a
histogram or frequent values: When there are more unique values than `num_bins` then a histogram is produced,
otherwise - frequent values.
random_state : int
random seed
n_jobs : int
number of cores for XGBoost in _calc_prediction_power
requested_stats : list of strings or None
Possible values:
None - return the default set of stats
['only_prediction_power'] - returns only prediciton power
Additional values are:
'text_stats' which return the default set of stats and the additionaly requested stats.
For example: ['text_stats']
Returns
-------
dict: data insights metrics. Statistics will be derived according to the provided feature type. The fields with *
are derived from the provided metrics, all other - from x and y
All feature types:
name: feature name taken from x
type: feature type provided in the input
metrics*: metrics dict provided in the input
prediction_power and normalized_prediction_power: available when y and task are provided
frequent_elements: calculated using histogram_functions.calc_frequent_elements
missing_ratio*: ratio of number of null like and empty rows to the number of rows
{cs.INSIGHTS}: list of insights. Can include: TARGET_LEAKAGE, UNINFORMATIVE_FEATURE,
NUMERIC_DISGUISED_MISSING_VALUE, CATEGORICAL_RARE_CATEGORIES
Numeric:
robust_histogram: calculated using histogram_functions.calc_robust_histogram
histogram: calculated using numpy.histogram
outliers_ratio: ratio of the number of outliers to number of samples
skew: calculated using scipy.stats.skew
kurtosis: calculated using scipy.stats.kurtosis
valid_ratio*: ratio of the number of finite numeric values to the number of samples
Categorical / Binary:
valid_ratio*: ratio of the number of not null like values and not whitespace strings to the number of
samples
Text:
valid_ratio*: ratio of the number of not null like values and not whitespace strings to the number of
samples
important_words: for each word prediction_power, normalized_prediction_power and frequency
character_statistics: dictionary with character statistics. For each statistic a dictionary with
prediction_power, normalized_prediction_power and either robust_histogram or frequent_elements. The
possible character statistics are:
word_count: number of words
char_count: string length
special_ratio: ratio of non alphanumeric characters to non-spaces in the string, 0 if empty string
digit_ratio: ratio of digits characters to non-spaces in the string, 0 if empty string
lower_ratio: ratio of lowercase characters to non-spaces in the string, 0 if empty string
capital_ratio: ratio of uppercase characters to non-spaces in the string, 0 if empty string
Note that some of them could be missing if there's only one value. For example, word_count will be
missing if all texts contain exactly one word.
Datetime:
valid_ratio: ratio of number valid datetime values to the number of samples
datetime_features: dict. Prediction power and robust histogram or frequent elements stats for each of the
following: "month", "hour", "weekday", "year", "minute", "second", "week". Note that some items could
be missing if there is no relevant data in the feature
"""
if y is not None and task is None:
raise ValueError("Task must be specified when y is provided")
if feature_type not in [ft.NUMERIC, ft.CATEGORICAL, ft.TEXT, ft.DATETIME, ft.BINARY, ft.VECTOR]:
raise ValueError(
f"feature_type must be one of {ft.NUMERIC}, {ft.CATEGORICAL}, {ft.TEXT}, {ft.DATETIME}, {ft.BINARY} or "
f"{ft.VECTOR}"
)
if requested_stats is not None:
for rs in requested_stats:
assert rs in ["only_prediction_power", "text_stats", "text_patterns"]
if "only_prediction_power" in requested_stats and len(requested_stats) > 1:
raise ValueError(
f"Other stats are not allowed when requested_stats contains only_prediction_power. Requested_stats "
f"is {requested_stats}"
)
assert task is None or task in [tt.REGRESSION, tt.BINARY_CLASSIFICATION, tt.MULTICLASS_CLASSIFICATION]
x_numpy, y_numpy = _numpy_conversion(x, y)
# transform the feature using a default transform for this feature_type. The transformed featured is used to derive
# some statistics. For example: prediction power
# Statistics relevant to all feature types
insights = {
"name": x.name,
"type": feature_type,
"metrics": metrics,
}
if len(x_numpy) == 0:
insights["missing_ratio"] = 1
insights["valid_ratio"] = 0
insights["frequent_elements"] = []
insights[cs.INSIGHTS] = []
return insights
feature_transform, x_transformed = get_feature_transform_and_transformed_x(feature_type, x_numpy)
if y is not None:
from sagemaker_data_insights.model_utils import _calc_prediction_power
insights["prediction_power"], insights["normalized_prediction_power"] = _calc_prediction_power(
x_transformed, y_numpy, task, random_state, n_jobs=n_jobs
)
if requested_stats is not None and "only_prediction_power" in requested_stats:
return insights
insights["frequent_elements"] = calc_frequent_elements(x_numpy, y_numpy, task)
insights["missing_ratio"] = missing_ratio(metrics)
# Add insights and statistics specific for each feature type
insights.update(
{
ft.NUMERIC: lambda: _analyze_numeric_feature(
x_transformed, y_numpy, metrics, task, insights["frequent_elements"], num_bins
),
ft.TEXT: lambda: _analyze_text_feature(
x_numpy, y_numpy, metrics, task, num_bins, n_jobs, requested_stats=requested_stats
),
ft.DATETIME: lambda: _analyze_datetime_feature(
feature_transform, x_transformed, y_numpy, metrics, task, num_bins, random_state
),
ft.BINARY: lambda: _analyze_binary_feature(metrics),
ft.CATEGORICAL: lambda: _analyze_categorical_feature(insights["frequent_elements"], metrics),
ft.VECTOR: lambda: _analyze_vector_feature(metrics),
}[feature_type]()
)
# Insights relevant to all feature types
if "normalized_prediction_power" in insights:
if insights["normalized_prediction_power"] > Insights.TARGET_LEAKAGE_THRESHOLD:
insights[cs.INSIGHTS].append(Insights.generate(Insights.TARGET_LEAKAGE, Insights.HIGH))
elif insights["normalized_prediction_power"] <= Insights.UNINFORMATIVE_FEATURE_THRESHOLD:
insights[cs.INSIGHTS].append(Insights.generate(Insights.UNINFORMATIVE_FEATURE, Insights.LOW))
if len(insights["frequent_elements"]["frequency"]) == 1:
insights[cs.INSIGHTS].append(Insights.generate(Insights.CONSTANT_FEATURE, Insights.LOW))
return insights
def get_valid_transformed_data(x_transformed: np.array, y: np.array):
valid_idxs = np.isfinite(x_transformed)
y_valid = None if y is None else y[valid_idxs].ravel()
x_transformed_valid = x_transformed[valid_idxs]
return valid_idxs, y_valid, x_transformed_valid
def _analyze_numeric_feature(
x_transformed: np.array, y: np.array, metrics: dict, task: str, frequent_elements: dict, num_bins: int
):
valid_idxs, y_valid, x_transformed_valid = get_valid_transformed_data(x_transformed, y)
robust_histogram = calc_robust_histogram(x_transformed_valid.ravel(), y_valid, task=task, num_bins=num_bins)
num_outliers = robust_histogram_num_outliers(robust_histogram)
count, bin_edges = np.histogram(x_transformed_valid.ravel(), bins=num_bins)
histogram = {
"hist_count": count.astype(int).tolist(),
"hist_edges": bin_edges.astype(float).tolist(),
"lower_bin_is_outlier": False,
"upper_bin_is_outlier": False,
}
# insights for numeric feature
insights = []
if (
frequent_elements["frequency"][0] > Insights.NUMERIC_DISGUISED_THRESHOLD
and len(frequent_elements["frequency"]) > 1
and frequent_elements["frequency"][0] > Insights.NUMERIC_DISGUISED_RATIO * frequent_elements["frequency"][1]
and str(frequent_elements["value"][0]).isnumeric()
and metrics["cardinality"] > Insights.NUMERIC_DISGUISED_MIN_UNIQUE
):
insights.append(
Insights.generate(
Insights.NUMERIC_DISGUISED_MISSING_VALUE,
Insights.MEDIUM_FEATURE,
{"value": frequent_elements["value"][0], "frequency": frequent_elements["frequency"][0]},
)
)
return {
"robust_histogram": robust_histogram,
"histogram": histogram,
"outliers_ratio": float(num_outliers / sum(valid_idxs)),
"skew": float(scipy.stats.skew(x_transformed_valid.ravel())),
"kurtosis": float(scipy.stats.kurtosis(x_transformed_valid.ravel())),
"valid_ratio": valid_ratio(metrics, ft.NUMERIC),
cs.INSIGHTS: insights,
}
def _analyze_categorical_feature(frequent_elements: dict, metrics: dict):
insights = []
if len(frequent_elements["frequency"]) == 0: # The column contains only missing values
return {"valid_ratio": 0, cs.INSIGHTS: insights}
normalized_frequency = np.array(frequent_elements["frequency"]) / frequent_elements["frequency"][0]
num_rare_categories = sum(normalized_frequency < Insights.CATEGORICAL_RARE_CATEGORIES_THRESHOLD)
if num_rare_categories > 2:
rare_categories = list(np.array(frequent_elements["value"])[normalized_frequency < 0.05])
rare_categories_frequency = list(np.array(frequent_elements["frequency"])[normalized_frequency < 0.05])
insights.append(
Insights.generate(
Insights.CATEGORICAL_RARE_CATEGORIES,
Insights.MEDIUM_FEATURE,
{"rare_categories": rare_categories, "rare_categories_frequency": rare_categories_frequency},
)
)
return {
"valid_ratio": valid_ratio(metrics, ft.CATEGORICAL),
cs.INSIGHTS: insights,
}
def _analyze_binary_feature(metrics: dict):
return {
"valid_ratio": valid_ratio(metrics, ft.BINARY),
cs.INSIGHTS: [],
}
def _analyze_datetime_feature(
feature_transform, x_transformed: np.array, y: np.array, metrics: dict, task: str, num_bins: int, random_state=0
):
datetime_features = {}
# go over the internal features produced by the feature_transform e.g. week, month, hour etc.
for idx, e in enumerate(feature_transform.extract_):
col = x_transformed[:, idx].reshape((-1, 1))
valid_rows = np.isfinite(col)
col = col[valid_rows].reshape((-1, 1))
internal_feature_insights = {}
assert e.extract_func.__name__[:8] == "extract_"
internal_feature_name = e.extract_func.__name__[8:] # remove `extract_` from the head of the string
if y is not None:
from sagemaker_data_insights.model_utils import _calc_prediction_power
y_valid = y[valid_rows].reshape((-1, 1))
(
internal_feature_insights["prediction_power"],
internal_feature_insights["normalized_prediction_power"],
) = _calc_prediction_power(col, y_valid, task, random_state)
else:
y_valid = None
# Some internal feature types should always be frequent elements. For others, they are frequent elements when
# they contain few unique elements or histogram when they contain many unique elements
if internal_feature_name in ["quarter", "month", "hour", "weekday"] or len(np.unique(col)) <= num_bins:
internal_feature_insights["frequent_elements"] = calc_frequent_elements(
col.astype(int), y_valid, task=task, sort_type="value", max_num_elements=len(np.unique(col))
)
else:
internal_feature_insights["robust_histogram"] = calc_robust_histogram(
col, y_valid, task=task, num_bins=num_bins
)
datetime_features[internal_feature_name] = internal_feature_insights
return {
"valid_ratio": valid_ratio(metrics, ft.DATETIME),
"datetime_features": datetime_features,
cs.INSIGHTS: [],
}
def _analyze_text_feature( # noqa: C901
x: np.array,
y: np.array,
metrics: dict,
task: str,
num_bins: int,
random_state: int = 0,
n_jobs: int = 1,
num_top_words: int = 200,
requested_stats: list = None,
):
"""
Derive statistics and insights specific to text features.
Parameters
----------
x : np.ndarray of size (height, 1)
text feature
y : np.ndarray of size (height, 1)
clean and encoded target column. See the documentation in `analyze_feature`
metrics : dictionary
See the documentation in `analyze_feature`
task : str or None (must be provided when y is provided)
REGRESSION, BINARY_CLASSIFICATION or MULTICLASS_CLASSIFICATION
num_bins : int >= 3
number of bins to use in histograms. In some cases, this is used as the decision threshold between producing a
histogram or frequent values: When there are more unique values than `num_bins` then a histogram is produced,
otherwise - frequent values.
random_state: int
random seed used for RNG
n_jobs : int
number of cores for XGBoost in _calc_prediction_power
num_top_words: int
max number of most important words to return, see `from important_words` below
requested_stats : list of strings or None
Possible values:
* 'text_stats' for statistics on the distrbution of characters and tokens
* 'text_patterns' for results of an analysis of textual patterns
Returns
-------
dict: text feature insights. See analyze_feature
"""
x_list = list(x.ravel())
insights = {
"valid_ratio": valid_ratio(metrics, ft.TEXT),
cs.INSIGHTS: [],
"character_statistics": {},
}
if not requested_stats:
return insights
if "text_stats" in requested_stats:
from sagemaker_data_insights.text_utils import CharacterStatistics
# Numeric character statistics: from every string extract various ratio and count statistics. These are numeric
# features that capture various characteristics of the string
for desc, func in CharacterStatistics.functions.items():
feat = np.vectorize(func)(x_list).reshape((-1, 1))
num_unique = len(np.unique(feat))
if num_unique <= 1:
continue
feat_stats = {}
if y is not None:
from sagemaker_data_insights.model_utils import _calc_prediction_power
feat_stats["prediction_power"], feat_stats["normalized_prediction_power"] = _calc_prediction_power(
feat, y, task, random_state, n_jobs=n_jobs
)
if num_unique > num_bins:
feat_stats["robust_histogram"] = calc_robust_histogram(feat, y, task, num_bins=num_bins)
else:
feat_stats["frequent_elements"] = calc_frequent_elements(
feat, y, task, max_num_elements=num_bins, sort_type="value"
)
insights["character_statistics"][desc] = feat_stats
from sagemaker_data_insights import text_utils
# token importance: add information about token importance when tokenizing based on words
insights["important_words"] = text_utils.token_importance(
x, y, task, random_state=random_state, analyzer="word", n_jobs=n_jobs, num_top_features=num_top_words
)
if "text_patterns" in requested_stats:
from sagemaker_data_insights.patterns.analyze_patterns import analyze_text_patterns
expression_set = analyze_text_patterns(x.reshape(-1), min_coverage=0.8, random_state=random_state)
num_experiments, sample_size = expression_set.experiment_statistics()
pattern_columns = ["Pattern", "Relevance", "Regular expression", "Matches", "Non-matches"]
pattern_dict = {k: [] for k in pattern_columns}
for expr in expression_set.ranked_expressions():
pattern = expr.annotated_str()
confidence = expr.coverage_accumulator.value()
# Surround matches and nonmatches with angle brackets to show whitespace.
matches = _sanitize_strings(expr.matches_histogram.top_n(5))
nonmatches = _sanitize_strings(expr.outliers_histogram.top_n(5))
num_rows = max(len(matches), len(nonmatches))
padding = [""] * (num_rows - 1)
pattern_dict["Pattern"].extend([pattern] + padding)
# Our external language for accuracy/confidence is 'Relevance'.
pattern_dict["Relevance"].extend(["{:.2f}".format(100 * confidence)] + padding)
pattern_dict["Regular expression"].extend([expr.regex(use_token_lengths=True)] + padding)
pattern_dict["Matches"].extend(matches + [""] * (num_rows - len(matches)))
pattern_dict["Non-matches"].extend(nonmatches + [""] * (num_rows - len(nonmatches)))
if confidence < 1 and confidence >= Insights.HIGH_CONFIDENCE_PATTERN_THRESHOLD:
insights[cs.INSIGHTS].append(
Insights.generate(
Insights.HIGH_CONFIDENCE_PATTERN,
Insights.MEDIUM,
{
"pattern": pattern,
"confidence": confidence,
"num_experiments": num_experiments,
"sample_size": sample_size,
},
)
)
# If there are no patterns, return a table with a single column and an informative message.
if expression_set.best_expression() is None:
pattern_columns = ["Pattern"]
pattern_dict = {"Pattern": ["No textual patterns found."]}
pattern_df = pd.DataFrame(columns=pattern_columns, data=pattern_dict)
insights["text_patterns"] = pattern_df.to_dict()
return insights
def _show_whitespace(str):
"""Replaces leading and trailing whitespace with tokens. Additionally tokenizes an empty string."""
if str == "":
return "{empty string}"
WHITESPACE = "{whitespace}"
str = re.sub(r"^\s+", WHITESPACE, str)
str = re.sub(r"\s+$", WHITESPACE, str)
return str
def _sanitize_strings(strs):
# Replace leading an trailing whitespace with tokens.
strs = [_show_whitespace(s) for s in strs]
# Deduplicate strings while maintaining initial order.
return list(dict.fromkeys(strs))
def _analyze_vector_feature(metrics):
return {
"valid_ratio": valid_ratio(metrics, ft.VECTOR),
cs.INSIGHTS: [],
}
def get_feature_type(metrics: dict, allowed_types: list = None, prefer_categorical=False):
"""
Feature type analyzer
Parameters
----------
metrics : dict
must include all the following keys: `median`, `numeric_finite_count`, `nrows`, `null_like_count`,
`empty_count`, `whitespace_count`, `cardinality`, `datetime_count`, `datetime_non_float_count` and
`datetime_rows_parsed`. Keys definitions can be found in const.py. While `x` is usually a sample of the data,
the metrics should be calculated on the whole data or on a larger sample.
allowed_types: list(str)
List of allowed feature types. Can include the following types from const.py: NUMERIC, CATEGORICAL, TEXT,
DATETIME, BINARY. By default includes all.
prefer_categorical: bool
Prefer categorical types to numerical types in case of ties.
TODO. This flag is being used for Ganymede and will be tested for use as
a default option in Data Insights. If it becomes a default option,
consumers of data insights will need to do regression tests when they
upgrade to the 2.0 branch (this branch).
Returns
-------
str: feature type
The type with the highest probability out of the types allowed in `allowed_types`
dict: feature type probabilities
The probability for the feature to be each of the types NUMERIC, CATEGORICAL, TEXT, DATETIME, BINARY. Type not
included in `allowed_types` will have a zero probability
"""
# TODO: add detection of ft.VECTOR copy logic from
# https://code.amazon.com/packages/AIAlgorithmsDataInsights/commits/081735f1f34b8c8ea7e24f76c390f84036f98e84
# The order of all_types is used to break ties
# the types are ordered by importance.
if prefer_categorical:
all_types = [ft.BINARY, ft.CATEGORICAL, ft.NUMERIC, ft.TEXT, ft.DATETIME]
else:
all_types = [ft.BINARY, ft.NUMERIC, ft.CATEGORICAL, ft.TEXT, ft.DATETIME]
if not allowed_types:
allowed_types = all_types
else:
for t in allowed_types:
if t not in all_types:
raise ValueError(f"Error: type {t} is not allowed. Allowed types: {allowed_types}")
probs = _calculate_schema_probs(
allowed_types,
metrics["median"],
metrics["numeric_finite_count"],
metrics["nrows"],
metrics["null_like_count"],
metrics["empty_count"],
metrics["whitespace_count"],
metrics["cardinality"],
metrics["datetime_count"],
metrics["datetime_non_float_count"],
metrics["datetime_rows_parsed"],
)
# We will use the column type with the highest score, breaking ties using
# binary > numeric > categorical > text > datetime unless prefer_categorical is set, in which case we use
# binary > categorical > numeric > text > datetime to break ties.
score_type_pairs = [(probs[key], key) for key in all_types]
score_type_pairs.sort(key=lambda x: -x[0]) # This sort is stable, so will preserve order above on ties
_, col_type = score_type_pairs[0]
return col_type, probs
def _calculate_schema_probs( # noqa: C901
allowed_types,
median,
numeric_finite_count,
nrows,
null_like_count,
empty_count,
whitespace_count,
cardinality,
datetime_count,
datetime_non_float_count,
datetime_rows_parsed,
):
"""
Calculates the probabilities for the feature to be any of the types of `schema_types` based on a set of heuristic
rules
Parameters
----------
allowed_types: list(str)
List of allowed feature types. Can include the following types from const.py: NUMERIC, CATEGORICAL, TEXT,
DATETIME, BINARY. By default includes all.
median: see description in `const.py`
numeric_finite_count: see description in `const.py`
nrows: see description in `const.py`
null_like_count: see description in `const.py`
empty_count: see description in `const.py`
whitespace_count: see description in `const.py`
cardinality: see description in `const.py`
datetime_count: see description in `const.py`
datetime_non_float_count: see description in `const.py`
datetime_rows_parsed: see description in `const.py`
Returns
-------
dict: feature type probabilities
The probability for the feature to be each of the types NUMERIC, CATEGORICAL, TEXT, DATETIME, BINARY. Type not
included in `allowed_types` will have a zero probability
"""
# Initialize all types to zero
numeric = 0
categorical = 0
text = 0
datetime = 0
binary = 0
# Probability-like score of column being numeric is proportional to the fraction of entries castable to float.
if ft.NUMERIC in allowed_types and not np.isnan(median):
numeric = numeric_finite_count / (nrows - null_like_count - empty_count - whitespace_count)
# Probability-like score of column being text is proportional to the fraction of non-numeric, non-empty entries.
if ft.TEXT in allowed_types:
text_like_rows = nrows - numeric_finite_count - null_like_count - empty_count - whitespace_count
text = text_like_rows / nrows
if cardinality == 2:
if ft.BINARY in allowed_types:
binary = 1.0
text = 0.0
if ft.CATEGORICAL in allowed_types:
categorical = 1.0
text = 0.0
elif ft.CATEGORICAL in allowed_types:
categorical_ratio = cardinality / nrows
if categorical_ratio < 0.01:
# If there are on average more than 1/.01 = 100 entries per category, use full categorical and disable text.
categorical = 1.0
text = 0.0
elif 0.01 <= categorical_ratio < 0.2:
# If there are on average fewer than 1/.01 = 100 but more than 1/.2 = 5 entries per category,
# then proportionally decrease probability to zero.
categorical = 1 - (categorical_ratio - 0.01) / 0.19
else:
# Don't count as categorical if on average there are fewer than 5 entries per category.
categorical = 0.0
if (
ft.DATETIME in allowed_types
and datetime_non_float_count / datetime_rows_parsed > 0.05
and datetime_count / datetime_rows_parsed > 0.6
):
datetime = 1.0
text = 0.0
categorical = 0.0
numeric = 0.0
# Normalize so that scores sum to 1.
normalizing_sum = numeric + categorical + text + datetime + binary
if normalizing_sum == 0:
raise ValueError(f"Error: scores for all allowed types are zero. Allowed types: {allowed_types}")
numeric = numeric / normalizing_sum
categorical = categorical / normalizing_sum
text = text / normalizing_sum
datetime = datetime / normalizing_sum
binary = binary / normalizing_sum
return {ft.NUMERIC: numeric, ft.CATEGORICAL: categorical, ft.TEXT: text, ft.DATETIME: datetime, ft.BINARY: binary} | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/analyze_feature.py | 0.841207 | 0.53443 | analyze_feature.py | pypi |
import pandas as pd
import numpy as np
from sagemaker_data_insights.const import FeatureType as ft
from sagemaker_data_insights.histogram_functions import _unique_without_whitespaces
from sagemaker_data_insights.utils.feature_transform import get_feature_transform
def _calc_stats_pandas_series(
x: pd.Series,
max_unique_labels: int = 100,
datetime_num_rows: int = 10000,
dw_light_mode: bool = False,
optimize_datetime_parsing: bool = False,
random_state: int = 1,
):
nrows = x.shape[0]
x_no_na = x.dropna()
x_numpy = x.astype(str).to_numpy().astype(str)
unique, counts = _unique_without_whitespaces(x_no_na.to_numpy(dtype=str))
x_head = x.astype(str).head(datetime_num_rows)
x_float = get_feature_transform(ft.NUMERIC, True).fit_transform(x_numpy.reshape((-1, 1))).ravel()
is_numeric = ~pd.isnull(x_float)
numeric_count = np.sum(is_numeric)
DATETIME_SAMPLE_SIZE = 1000
DATETIME_SAMPLE_THRESHOLD = 0.95
if len(x) < DATETIME_SAMPLE_SIZE:
optimize_datetime_parsing = False
reparse_datetimes = False
if optimize_datetime_parsing:
# Datetime parsing is slow when operating on columns without many dates
# such as long textual columns or categorical columns. We implement a
# short-circuit that samples a column. If there aren't many dates in
# that column, then the column is marked as not as date column.
# If there are many dates in the column, the column will be parsed as
# usual. We use a sample size of 1000 and a threshold of 95% - meaning
# that at least 95% of the sample must be a 'date' in order to consider
# parsing the colum further.
# Currently `optimize_date_parsing` is used in Ganymede for performance
# reasons with dates.
x_sample = x_head.sample(n=DATETIME_SAMPLE_SIZE, random_state=random_state)
is_numeric_sample = is_numeric[x_sample.index]
is_datetime = ~pd.isnull(pd.to_datetime(x_sample.astype(str), errors="coerce"))
is_datetime_non_numeric = is_datetime * ~is_numeric_sample[:datetime_num_rows]
# Multiplication factor from our sample to the entire column.
factor = len(x) / DATETIME_SAMPLE_SIZE
datetime_count = int(np.sum(is_datetime) * factor)
if datetime_count / len(x) > DATETIME_SAMPLE_THRESHOLD:
# If our sample shows that the majority of rows contain dates, then we can proceed to completely parse
# the row as it won't be slow.
reparse_datetimes = True
else:
# To avoid recomputing this, we only compute it if we are not going to reparse datetimes.
datetime_non_float_count = int(np.sum(is_datetime_non_numeric) * factor)
if not optimize_datetime_parsing or reparse_datetimes:
# This code path is used by Canvas and parses the 'head' only.
is_datetime = ~pd.isnull(pd.to_datetime(x_head, errors="coerce"))
is_datetime_non_numeric = is_datetime * ~is_numeric[:datetime_num_rows]
datetime_count = int(np.sum(is_datetime))
datetime_non_float_count = int(np.sum(is_datetime_non_numeric))
stats = {
"cardinality": len(unique),
"median": float(np.nanmedian(x_float)) if numeric_count > 0 else np.nan,
"numeric_finite_count": int(np.sum(is_numeric)),
"null_like_count": int(nrows - x_no_na.shape[0]),
"empty_count": int(np.sum(x_numpy == "")),
"whitespace_count": int(np.sum(np.char.strip(x_numpy) == "")) - int(np.sum(x_numpy == "")),
"datetime_count": datetime_count,
"datetime_non_float_count": datetime_non_float_count,
"datetime_rows_parsed": len(is_datetime),
"nrows": nrows,
}
if not dw_light_mode:
stats.update(
{
"labels": unique.tolist()[:max_unique_labels],
"label_counts": counts.tolist()[:max_unique_labels],
"max": float(np.nanmax(x_float)) if numeric_count > 0 else np.nan,
"min": float(np.nanmin(x_float)) if numeric_count > 0 else np.nan,
"mean": float(np.nanmean(x_float)) if numeric_count > 0 else np.nan,
"integer_count": int(np.sum([val.is_integer() for val in x_float])),
}
)
return stats | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/calc_stats_pandas_series.py | 0.649245 | 0.631537 | calc_stats_pandas_series.py | pypi |
from typing import List, Dict
from collections import Counter
from difflib import SequenceMatcher
import logging
import numpy as np
from sklearn.cluster import AgglomerativeClustering
def find_duplicate_categories(
strs: List[str],
max_categories: int = 100,
max_str_length=50,
correction_threshold=0.7,
distance_threshold: float = 0.1,
) -> Dict[str, str]:
"""Given a list of textual categories containing typographical errors, this
method will return a mapping that defines corrections to those categories.
Args:
categories (List[str]): A list of textual categories.
max_categories (int, optional): The maximum number of categories to
process (for performance).
max_str_length (int, optional): The maximum length of a string to consider (for performance).
correction_threshold (int, optional): Corrections are only made if the
most popular category is occurs with at least
this threshold.
distance_threshold (float, optional): The distance threshold to use when
computing clusters (0-1).
Returns:
Dict[str, str]: A map from categories to corrected categories. Only
categories to be corrected are included in this map.
"""
# 1. Preprocess and validate input.
# Remove duplicates. This is crucial for performance if the list has many duplicate entries.
deduped_strs = set(strs)
# Remove strings that are too long.
categories = list(set(filter(lambda x: len(x) <= max_str_length, deduped_strs)))
n = len(categories)
# If there are too few distinct categories, there is nothing to do. If there
# are too many, we don't proceed due to performance constraints.
# Note: it would be a better optimization to choose the top max_categories to process.
if n > max_categories or n < 2:
return {}
# 2. Compute the distance matrix for the set of categories
distances = _compute_distance_matrix(categories)
# 3. Find hieararchial clusters in the categories.
hierarchial = AgglomerativeClustering(
n_clusters=None, distance_threshold=distance_threshold, affinity="precomputed", linkage="complete"
)
hierarchial.fit(distances)
# Interpret the hierarchical clusters. Fit returns a list of ordinals with
# equal ordinals corresponding to identical categories. We transform this
# list to a list of lists.
labels = hierarchial.labels_
label_indices = {}
for index, label in enumerate(labels):
if label in label_indices:
label_indices[label].append(index)
else:
label_indices[label] = [index]
# 4. Construct a mapping of category counts that we will use to sort categories by popularity.
category_counts = Counter(strs) # pylint: disable=E1121
category_mapping = {}
for _, indices in label_indices.items():
if len(indices) <= 1:
continue
# Create a list of ordered pairs containing categories and their counts.
category_class = [categories[i] for i in indices]
category_class_with_counts = [(cat, category_counts[cat]) for cat in category_class]
# Find the canonical representative of the equivalance class based on popularity.
representative, representative_count = max(category_class_with_counts, key=lambda x: x[1])
# Make sure the category representative meets the correction_threshold to make corrections
total_count = sum([category_counts[cat] for cat in category_class])
popularity = representative_count / total_count
if popularity < correction_threshold:
continue
logging.debug("Found %d category typo(s) for textual category", len(category_class) - 1)
# Add the mappings to the category mapping
for cat in category_class:
if cat == representative:
continue
category_mapping[cat] = representative
return category_mapping
def correct_category_typos(strs: List[str]) -> List[str]:
"""Given a list of textual categories containing typographical errors, this
method will a corrected list of categories.
Args:
strs (List[strs]): A list of textual categories.
Returns:
List[strs]: A corrected list of categories.
"""
category_mapping = find_duplicate_categories(strs)
# Apply the category mapping to the strings to produce a corrected set of strs.
corrected_strs = strs[:]
for i, s in enumerate(strs):
if s in category_mapping:
corrected_strs[i] = category_mapping[s]
return corrected_strs
def _compute_distance_matrix(strs: List[str]) -> np.ndarray:
"""Computes a distance matrix between strings using fuzzy string matching."""
n = len(strs)
distances = np.zeros(shape=(n, n))
for i, ic in enumerate(strs):
for j, jc in enumerate(strs):
distances[i, j] = 1 - similarity_score(ic, jc)
return distances
def similarity_score(s1: str, s2: str) -> float:
"""A similarity score between strings. This method is symmetric in the arguments.
This uses a fast implementation of Levenshtein distance. This uses difflibs under the hood.
Args:
s1 (str): The first string
s2 (str): The second string
Returns:
float: A number from 0 to 1 indicating how similiar the strings are, with 1 being the most similar.
"""
if not s1 or not s2:
return 0
m = SequenceMatcher(None, s1, s2)
return m.ratio() | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/categorical_utils.py | 0.962944 | 0.664731 | categorical_utils.py | pypi |
from typing import List
import numpy as np
from .tokens import Tokens
from .expression import Expression, ExpressionSet, ExpressionSetType
from .parse import Parse
def analyze_text_patterns(
strs: List[str],
min_coverage: int = 0.8,
sampling_iterations: int = 10,
sampling_size: int = 30,
max_tokens: int = 100,
min_examples: int = 2,
random_state: int = 0,
) -> ExpressionSetType:
"""Analyze strings and extract patterns.
A list of strings (potentially large) is sampled and resampled. Each time
the sample strings are parsed into expressions, and the expressions are
tested for coverage accuracy against the sample, and the sample is used to
record token length statistics. After a number of iterations, a set of
candidate expressions is returned.
The default values for the parameters have been tested on live and synthetic
data sets. They balance performance and accuracy and shouldn't need to be
adjusted.
Args:
strs (List[str]): a list of strings to analyze
min_coverage (float, optional): The cut-off of minimum coverage of the expressions to return.
sampling_iterations (int, optional): The number of samples to take in order to find patterns and statistics.
sampling_size (int, optional): The sample size of strings to take when heuristically computing patterns and
statistics. If there are fewer strings than this size, the entire set of strings
will be used.
max_tokens (int, optional): The maximum number of tokens allowed to parse in a string. Strings with more tokens
are filtered out.
min_examples (int, optional): Samples are filtered for suitable strings. This is the minimum size of a filtered
sample to analyze.
random_state (int, optional): A random state to seed the random number generator.
Returns:
ExpressionSetType: An expression set containing extracted (pattern)
expressions and recorded statistics such as token length statistics or
coverage statistics. """
# The perferred method for seeding a random number generator. See NEP 19
# https://numpy.org/neps/nep-0019-rng-policy.html
rng = np.random.default_rng(random_state)
# In the event we don't have many strings, we can optimize the experiment.
if len(strs) <= sampling_size:
sampling_size = len(strs)
sampling_iterations = 1
expression_set = ExpressionSet(min_coverage=min_coverage)
for _ in range(sampling_iterations):
sample = rng.choice(strs, sampling_size, replace=False)
# Remove strings with too many tokens.
sample = list(filter(lambda x: len(Tokens.split_by_delimiters(x)) <= max_tokens, sample))
# Ensure we have enough strings after filtering for long strings.
if len(sample) < min_examples:
continue
# Parse the set of strings into pattern expressions.
token_lists = [Parse.parse(s) for s in sample]
exprs = [Expression(t) for t in token_lists]
# Use the sample to calculate approximations to the coverage and token
# length statistics.
for expr in exprs:
expr.coverage(sample)
# Combine the new expressions into the expression set, aggregating any new statistics.
expression_set.combine(exprs)
expression_set.record_experiment(len(sample))
return expression_set | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/patterns/analyze_patterns.py | 0.943439 | 0.796253 | analyze_patterns.py | pypi |
from typing import Any
class AverageAccumulator:
"""An aggregator class to maintain an average."""
def __init__(self):
self.sum = 0.0
self.n = 0
def value(self) -> float:
"""Returns the average based on currently obtained data. If there is no data, returns 0.
Returns:
float: the average
"""
if self.n == 0:
return 0
return self.sum / self.n
def accumulate(self, o: Any) -> None:
"""Aggregates another accumulator or a floating point.
"""
if isinstance(o, AverageAccumulator):
self.sum += o.sum
self.n += o.n
else:
self.sum += o
self.n += 1
def reset(self):
"""Reset the class"""
self.sum = 0.0
self.n = 0
class Histogram:
""" A class used for storing histogrammic data."""
def __init__(self):
self.hist = {}
def witness(self, val, count=1):
"""See the value and count it in the histogram."""
if val in self.hist:
self.hist[val] = self.hist[val] + count
else:
self.hist[val] = count
def top_n(self, n):
"""Returns the top 'n' items (or top(len(histogram)) if len(histogram) < n), sorted by count."""
if len(self.hist) < n:
n = len(self.hist)
values = list(map(lambda x: x[0], sorted(self.hist.items(), key=lambda item: item[1], reverse=True)))
return values[0:n]
def merge(self, hist):
"""Merge statistics from another histogram into this histogram."""
for val, count in hist.hist.items():
self.witness(val, count)
def __str__(self):
return str(self.hist)
# TODO. This is fine for the prototype but ideally we want to do something
# more intelligent using the histogramatic data we have. We currently take
# the smallest and the largest witnessed tokens as the
# lower and upper bounds, but perhaps we want to exclude outliers better.
def range(self):
"""Returns an upper and lower bound for the token lengths based on witnessed values."""
if not self.hist.keys():
return None, None
return min(self.hist.keys()), max(self.hist.keys()) | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/patterns/utils.py | 0.909652 | 0.617455 | utils.py | pypi |
import re
from typing import List, Type, Tuple, Optional
LengthSpecifier = Tuple[int, int]
class Token:
"""A class to store tokens for pattern recognition..
This class represents a single token and contains information to generate
the equivalent regular expression for this token. The reason we have this
class is to natively express the simplified context-free grammar for Pattern
learning. This CFG is contained within the language of all regular
expressions. Thus each token has a regular expression.
N.B. Each instantiation of this class is meant to be a singleton class, and
there is exactly one instantiation for each Token. This is by design to
reduce memory churn. You should not be instantiating this class outside of
the Tokens class.
"""
def __init__(
self,
token: str,
token_regex: str = "",
head_length: int = 0,
length_specifier: Optional[LengthSpecifier] = None,
specificity: int = None,
):
"""Initializer for Token.
Tokens consist of an optional head regular expression that is matched
exactly once followed by a base regular expression that is repeated a
specified number of times. The token regex is the head, concatenated by
the base, followed by the length specifier.
Example 1. To match one or more digits,
Token = {digits}
Token Regex: [0-9]
Head Length: 0
Length Specifier = +
Giving the call: Token('{digits}', base_regex='[0-9]', base_length_specifier='+')
Example 2. To match a capitalized word,
Token = {name}
Token regex: [A-Z][a-z]
Head length: 1
Length Specifier = +
Giving the call: Token('{name}', head_regex='[A-Z]', base_regex='[a-z]', base_length_specifier='+')
Example 3. To match a delimiter character
Token = ',' (the literal)
Token regex = ',' (the literal)
Head length: 0
Length Specifier = None
Giving the call Token(',', base_regex=',')
Args:
token (str): The string representation of this token. Conventially we surrounded by braces for semantic
types.
token_regex (str): The main regular expression that may be repeated a number of times.
head_length (int, optional): The number of characters of the head of the regex.
base_length_specifier (int, optional): A specifier (e.g. '+', '*', {2,3}) for the number of times the
base_regex can be repeated.
specificity (int): A ranked measure of how specific the token is.
base_length_specifier (str, optional): A specifier, either '+' or '*' that is appended to the regular
expression specify the length of a match.
"""
self.token = token
self.token_regex = token_regex
self.head_length = head_length
self.length_specifier = length_specifier
self.specificity = specificity
def regex(self, length_specifier: Optional[LengthSpecifier] = None) -> str:
"""Returns the regular expression for this token with the ability to override the default length specifier.
Args:
range (pair of int, optional): A pair of lower and upper token lengths. Defaults to None.
Returns:
str: A string representation of the regular expression for this token.
"""
if self.length_specifier is None:
# Length specification for delimiters is ignored.
return self.token_regex
if length_specifier is None:
# No range specified, so use the default length specifier.
return f"{self.token_regex}{self.length_specifier}"
# A length specifier is given. Update it to account for the (required) head length.
lower = length_specifier[0] - self.head_length
upper = length_specifier[1] - self.head_length
if upper <= 1:
return f"{self.token_regex}"
if lower == upper:
return f"{self.token_regex}{{{lower}}}"
return f"{self.token_regex}{{{lower},{upper}}}"
def match(self, target, length_range=None):
"""Indicates whether or not the token matches the target, using the specified range"""
# TODO: We can optimize this by memoization.
return re.fullmatch(self.regex(length_specifier=length_range), target)
def __str__(self):
"""Returns the token string representation."""
return self.token
TokenType = Type[Token]
class Tokens:
"""A class containing all the Token category types for semantic types and delimiters and some related methods."""
# Definitions of semantic types.
# N.B. In future versions we will add additional simple types such as
# {TitleCase} | {camelCase} | {snake-case}
# as well as more complex types such as e-mail address, IP Address, etc.
ANY = Token("{any}", token_regex="\\w", specificity=0, length_specifier="*")
ALPHANUM = Token("{alnum}", token_regex="[A-Za-z0-9]", specificity=1, length_specifier="*")
MIXED = Token("{mixed}", token_regex="[A-Za-z]", specificity=2, length_specifier="*")
LOWER = Token("{lower}", token_regex="[a-z]", specificity=3, length_specifier="*")
UPPER = Token("{upper}", token_regex="[A-Z]", specificity=3, length_specifier="*")
NAME = Token("{name}", token_regex="[A-Z][a-z]", head_length=1, specificity=4, length_specifier="*",)
DIGITS = Token("{digits}", token_regex="[0-9]", specificity=5, length_specifier="*")
# Semantic types from highest to lowest specificity.
SEMANTIC_TYPES = [DIGITS, NAME, UPPER, LOWER, MIXED, ALPHANUM, ANY]
# Definitions of delimiter types.
DELIM_COMMA = Token(",", token_regex=",", specificity=10)
DELIM_HYPHEN = Token("-", token_regex="\\-", specificity=10)
DELIM_DOUBLE_QUOTE = Token('"', token_regex='"', specificity=10)
DELIM_NEWLINE = Token("{newline}", token_regex="\\n", specificity=10)
DELIM_LEFT_BRACE = Token("[", token_regex="\\[", specificity=10)
DELIM_LEFT_PAREN = Token("(", token_regex="\\(", specificity=10)
DELIM_BACKWARD_SLASH = Token("\\", token_regex="\\\\", specificity=10)
DELIM_PERIOD = Token(".", token_regex="\\.", specificity=10)
DELIM_RIGHT_BRACE = Token("]", token_regex="\\]", specificity=10)
DELIM_RIGHT_PAREN = Token(")", token_regex="\\)", specificity=10)
DELIM_RIGHT_HYPHEN = Token("-", token_regex="\\-", specificity=10)
DELIM_COLON = Token(":", token_regex=":", specificity=10)
DELIM_SEMIC = Token(";", token_regex=";", specificity=10)
DELIM_SINGLE_QUOTE = Token("'", token_regex="'", specificity=10)
DELIM_SPACE = Token(" ", token_regex=" ", specificity=10)
DELIM_TAB = Token("{tab}", token_regex="\t", specificity=10)
DELIM_FORWARD_SLASH = Token("/", token_regex="/", specificity=10)
# Separator types from highest to lowest specificity.
SEPARATOR_TYPES = [
DELIM_COMMA,
DELIM_HYPHEN,
DELIM_DOUBLE_QUOTE,
DELIM_NEWLINE,
DELIM_LEFT_BRACE,
DELIM_LEFT_PAREN,
DELIM_BACKWARD_SLASH,
DELIM_PERIOD,
DELIM_RIGHT_BRACE,
DELIM_RIGHT_PAREN,
DELIM_RIGHT_HYPHEN,
DELIM_COLON,
DELIM_SEMIC,
DELIM_SINGLE_QUOTE,
DELIM_SPACE,
DELIM_TAB,
DELIM_FORWARD_SLASH,
]
DELIMITER_LIST = [str(d) for d in SEPARATOR_TYPES]
DELIMITER_REGEX = f"[{''.join([x.regex() for x in SEPARATOR_TYPES])}]"
SPLIT_BY_DELIMS_REGEX = re.compile(DELIMITER_REGEX)
SPLIT_WITH_DELIMS_REGEX = re.compile(f"({DELIMITER_REGEX})")
@staticmethod
def is_delimiter(token: str) -> bool:
"""Is the token a delimiter"""
return token in Tokens.DELIMITER_LIST
@staticmethod
def split_by_delimiters(s, keep_delimiters: bool = True) -> List[str]:
"""Splits based on a list of delimiters.
Args:
keep_delimiters: Whether or not to keep the delimiters in the split.
delimiters: the list of delimiters (by default all delimiters in the class)
Returns:
A list of substrings split by the delimiters.
"""
if keep_delimiters:
splits = Tokens.SPLIT_WITH_DELIMS_REGEX.split(s)
else:
splits = Tokens.SPLIT_BY_DELIMS_REGEX.split(s)
# re.split returns empty strings in the split so that it is the inverse function
# of re.join. We remove those empty strings for the intended split behavior.
return list(filter(lambda x: len(x) > 0, splits)) | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/patterns/tokens.py | 0.838481 | 0.574156 | tokens.py | pypi |
import logging
import pandas as pd
from sagemaker_data_insights.column_data_insights.utils import _get_transformed_col_data
from sagemaker_data_insights.const import FeatureType as ft
from sagemaker_data_insights.analyze_feature import get_feature_type, missing_ratio, valid_ratio
from .constants import ColumnDataInsightsParamsKeys as CDP
from .utils import _calc_column_stats, _fetch_categorical_data, _fetch_non_categorical_data
def _get_column_profile(unique_count, feature_type: str, pandas_df_col: pd.Series) -> dict:
"""
Get column profile data
Args:
unique_count: Number of unique items in the column
feature_type: Logical data type [ft.NUMERIC, ft.DATETIME, ft.CATEGORICAL, ft.BINARY, ft.TEXT]
pandas_df_col: Column pandas.Series
Returns:
data: JSON of either robust histogram data or categorical data
"""
data = {}
transformed_col_data = _get_transformed_col_data(feature_type, pandas_df_col)
if feature_type in [ft.NUMERIC, ft.DATETIME]:
data = _fetch_non_categorical_data(unique_count, transformed_col_data)
elif feature_type in [ft.CATEGORICAL, ft.BINARY, ft.TEXT]:
data = _fetch_categorical_data(unique_count, transformed_col_data)
return data
def get_column_insights_data(col: str, pandas_df_col: pd.Series) -> tuple:
"""
Get insights data for a single column
Args:
col: Column name
pandas_df_col: Column pandas.Series
Returns:
res: JSON that includes column insights data
"""
res = {
CDP.COLUMN_NAME: col,
CDP.LOGICAL_DATA_TYPE: None,
CDP.VALID_RATIO: None,
CDP.INVALID_RATIO: None,
CDP.MISSING_RATIO: None,
CDP.COLUMN_PROFILE: None,
}
try:
stats = _calc_column_stats(pandas_df_col)
except Exception as e: # pylint: disable=W0703
logging.error(f"Failed to calculate basic stats for column {col} - {e}")
return res, {}
try:
feature_type, _ = get_feature_type(stats, prefer_categorical=True)
res[CDP.LOGICAL_DATA_TYPE] = feature_type
except Exception as e: # pylint: disable=W0703
logging.error(f"Failed to calculate feature type for column {col} - {e}")
return res, {}
# feature_type could be unknown
if feature_type in [ft.NUMERIC, ft.DATETIME, ft.CATEGORICAL, ft.BINARY, ft.TEXT]:
miss_r = missing_ratio(stats)
valid_r = valid_ratio(stats, feature_type)
invalid_r = 1 - valid_r - miss_r
res[CDP.VALID_RATIO] = valid_r
res[CDP.INVALID_RATIO] = invalid_r
res[CDP.MISSING_RATIO] = miss_r
try:
unique_count = stats["cardinality"]
column_profile = _get_column_profile(unique_count, feature_type, pandas_df_col)
res[CDP.COLUMN_PROFILE] = column_profile
except Exception as e: # pylint: disable=W0703
logging.error(f"Failed to calculate profile data for column {col} - {e}")
return res, {}
else:
logging.error(f"The feature type {feature_type} is not accepted in column data insights")
return res, {col: stats} | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/column_data_insights/column_insights_data.py | 0.794664 | 0.338473 | column_insights_data.py | pypi |
import pandas as pd
import numpy as np
from sagemaker_data_insights.const import FeatureType as ft
from sagemaker_data_insights.analyze_feature import (
get_feature_transform_and_transformed_x,
get_valid_transformed_data,
_numpy_conversion,
)
from sagemaker_data_insights.calc_stats_pandas_series import _calc_stats_pandas_series
from sagemaker_data_insights.histogram_functions import (
_frequent_element_helper,
calc_robust_histogram,
)
from .constants import (
COUNT,
DATA,
VALUE,
HistogramParamsKeys as HP,
CategoricalParamsKeys as CP,
)
def _calc_column_stats(pandas_df_col: pd.Series, datetime_num_rows: int = 10000):
"""
Calculate the basic statistics used by embedded data insights chart for a single pandas.Series
Args:
pandas_df_col: Column pandas.Series
datetime_num_rows(Optional): Baseline for datetime type
Returns:
JSON of calculated stats
"""
return _calc_stats_pandas_series(pandas_df_col, datetime_num_rows=datetime_num_rows, dw_light_mode=True)
def _cal_num_bins(count) -> int:
"""
Calculate num of bins that will be shown in embedded data insights chart for non-categotical data
Args:
count: Number of unique items in the column
Returns:
num_of_bins(int)
"""
return min(count, HP.HIST_MAX_BIN_NUM)
def _categorical_data_conversion(categorical_data, total_count, unique_count) -> dict:
"""
Convert categorical data to expected form used by embedded data insights chart
Args:
categorical_data: categorical data result from _calc_categorical_elements func
total_count: Number of total valid items in the column
unqiue_count: Number of unique items in the column
Returns:
JSON of reformed categorical data
"""
value_list = categorical_data[VALUE]
count_list = categorical_data[COUNT]
converted_categorical_data = []
n = len(count_list)
# categorical_data could be empty
if n > 0:
# Each element in categotical_data contains value and count
# Value refers to the category name, count refers to the count for that category
converted_categorical_data = [None] * (n + 1)
for i in range(n):
converted_categorical_data[i] = {VALUE: value_list[i], COUNT: count_list[i]}
# FIXME: If there is a value in the data same as `CP.OTHER`, we can't distinguish between that and the one below
# Construct "Other" category for rest categories
converted_categorical_data[i + 1] = {VALUE: CP.OTHER, COUNT: total_count - sum(count_list)}
return {CP.DISTINCT_VALUES: unique_count, DATA: converted_categorical_data}
def _non_categorical_data_conversion(robust_histogram_data) -> dict:
"""
Convert histogram data to expected form used by embedded data insights chart
Args:
robust_histogram_data: results from sagemaker-data-insights lib's robust_histogram_data func
Returns:
JSON of reformed robust non-categorical data
"""
# count_list and edges_list are returned from calc_robust_histogram func
# https://github.com/aws/sagemaker-data-insights/blob/main/src/sagemaker_data_insights/histogram_functions.py#L9
# It calls np.histogram under the hood. According to the doc
# (https://numpy.org/doc/stable/reference/generated/numpy.histogram.html)
# it is guaranteed that bin_edges will be(length(hist)+1)
count_list = robust_histogram_data[HP.HIST_COUNT]
edges_list = robust_histogram_data[HP.HIST_EDGES]
lower_is_outlier = robust_histogram_data[HP.LOWER_BIN_IS_OUTLIER]
upper_is_outlier = robust_histogram_data[HP.UPPER_BIN_IS_OUTLIER]
converted_histogram_data = []
n = len(count_list)
# The valid_num_of_bins comes from calc_robust_histogram func
# (https://github.com/aws/sagemaker-data-insights/blob/main/src/sagemaker_data_insights/histogram_functions.py#L9)
# It shall be handled in calc_robust_histogram, add double check here just in case
valid_num_of_bins = 3
if n >= valid_num_of_bins:
# The edges tuple is 1-1 mapping to count
# Each edge tuple contains two values: MIN and MAX, which are used to construct bin boundaries
edges_tuple_list = [None] * n
for i in range(n):
edges_tuple_list[i] = (edges_list[i], edges_list[i + 1])
converted_histogram_data = [None] * n
for i in range(n):
if i == 0:
isOutlier = lower_is_outlier
elif i == n - 1:
isOutlier = upper_is_outlier
else:
isOutlier = False
converted_histogram_data[i] = {
HP.MIN_VALUE: edges_tuple_list[i][0],
HP.MAX_VALUE: edges_tuple_list[i][1],
COUNT: count_list[i],
HP.IS_OUTLIER: isOutlier,
}
return {DATA: converted_histogram_data}
def _get_transformed_col_data(feature_type: str, pandas_df_col: pd.Series) -> list:
"""
Get transforemd column data based on feature types used by embedded data insights column profile calculation
Args:
feature_type: feature type of the column
pandas_df_col: origin pandas dataframe column
Returns:
res: transformed column data
"""
res = []
if feature_type == ft.NUMERIC:
pandas_df_col_numpy, _ = _numpy_conversion(pandas_df_col, None)
_, pandas_df_col_transformed = get_feature_transform_and_transformed_x(feature_type, pandas_df_col_numpy)
_, _, pandas_df_col_transformed_valid = get_valid_transformed_data(pandas_df_col_transformed, None)
res = pandas_df_col_transformed_valid.ravel()
elif feature_type == ft.DATETIME:
# Datetime is converted to numeric for histogram calculation
# [TODO]: Check if there is a better way for datetime conversion
# transform column data
datetime_col = pd.to_datetime(pandas_df_col.astype(str), errors="coerce", utc=True)
res = pd.to_numeric(datetime_col)
elif feature_type in [ft.CATEGORICAL, ft.BINARY, ft.TEXT]:
pandas_df_col_numpy, _ = _numpy_conversion(pandas_df_col, None)
res = pandas_df_col_numpy
return res
def _fetch_non_categorical_data(unique_count, data) -> dict:
"""
Fetch non-categorical data used by embedded data insights column profile calculation
Args:
unique_count: total count of unique items in the column
data: transformed column data
Returns:
converted non-categotical data in the form used by embedded data insights column profile
"""
num_bins = _cal_num_bins(unique_count)
robust_histogram_data = calc_robust_histogram(data, num_bins=num_bins)
return _non_categorical_data_conversion(robust_histogram_data)
def _fetch_categorical_data(unique_count, data) -> dict:
"""
Fetch categorical data used by embedded data insights column profile calculation
Args:
unique_count: total count of unique items in the column
data: transformed column data
Returns:
converted categotical data in the form used by embedded data insights column profile
"""
categorical_data, total_count = _calc_categorical_elements(data)
return _categorical_data_conversion(categorical_data, total_count, unique_count)
def _calc_categorical_elements(pandas_df_col: np.ndarray, max_num_elements=10) -> tuple:
"""
Calculate categorical elements that will be shown in embedded data insights chart for categorical data
Args:
pandas_df_col: Column pandas.Series
max_num_elements(Optional): Top x most frequent elements
Returns:
categorical_data_dict: Dict that contains categorical value and its count
total_count: Number of valid items in the column
"""
_, _, unique, counts, num_values, indices = _frequent_element_helper(pandas_df_col, None, max_num_elements)
f = {"value": unique[indices].tolist(), "count": counts[indices].tolist()}
return f, int(num_values) | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/column_data_insights/utils.py | 0.717111 | 0.716739 | utils.py | pypi |
import logging
import numpy as np
from sagemaker_data_insights.const import FeatureType as ft
def get_feature_type(metrics: dict, allowed_types: list = None, prefer_categorical=False) -> tuple:
"""
Feature type analyzer
Parameters
----------
metrics : dict
must include all the following keys: `median`, `numeric_finite_count`, `nrows`, `null_like_count`,
`empty_count`, `whitespace_count`, `cardinality`, `datetime_count`, `datetime_non_float_count` and
`datetime_rows_parsed`. Keys definitions can be found in const.py. While `x` is usually a sample of the data,
the metrics should be calculated on the whole data or on a larger sample.
allowed_types: list(str)
List of allowed feature types. Can include the following types from const.py: NUMERIC, CATEGORICAL, TEXT,
DATETIME, BINARY. By default includes all.
prefer_categorical: bool
Prefer categorical types to numerical types in case of ties.
TODO. This flag is being used for Ganymede and will be tested for use as
a default option in Data Insights. If it becomes a default option,
consumers of data insights will need to do regression tests when they
upgrade to the 2.0 branch (this branch).
Returns
-------
str: feature type
The type with the highest probability out of the types allowed in `allowed_types`
dict: feature type probabilities
The probability for the feature to be each of the types NUMERIC, CATEGORICAL, TEXT, DATETIME, BINARY. Type not
included in `allowed_types` will have a zero probability
"""
# TODO: add detection of ft.VECTOR copy logic from
# https://code.amazon.com/packages/AIAlgorithmsDataInsights/commits/081735f1f34b8c8ea7e24f76c390f84036f98e84
# The order of all_types is used to break ties
# the types are ordered by importance.
if prefer_categorical:
all_types = [ft.BINARY, ft.CATEGORICAL, ft.NUMERIC, ft.TEXT, ft.DATETIME]
else:
all_types = [ft.BINARY, ft.NUMERIC, ft.CATEGORICAL, ft.TEXT, ft.DATETIME]
if not allowed_types:
allowed_types = all_types
else:
for t in allowed_types:
if t not in all_types:
raise ValueError(f"Error: type {t} is not allowed. Allowed types: {allowed_types}")
probs = _calculate_schema_probs(
allowed_types,
metrics["median"],
metrics["numeric_finite_count"],
metrics["nrows"],
metrics["null_like_count"],
metrics["empty_count"],
metrics["whitespace_count"],
metrics["cardinality"],
metrics["datetime_count"],
metrics["datetime_non_float_count"],
metrics["datetime_rows_parsed"],
)
# We will use the column type with the highest score, breaking ties using
# binary > numeric > categorical > text > datetime unless prefer_categorical is set, in which case we use
# binary > categorical > numeric > text > datetime to break ties.
score_type_pairs = [(probs[key], key) for key in all_types]
score_type_pairs.sort(key=lambda x: -x[0]) # This sort is stable, so will preserve order above on ties
_, col_type = score_type_pairs[0]
logging.debug(f"col_type: {col_type}")
return col_type, probs
def _calculate_schema_probs( # noqa: C901
allowed_types,
median,
numeric_finite_count,
nrows,
null_like_count,
empty_count,
whitespace_count,
cardinality,
datetime_count,
datetime_non_float_count,
datetime_rows_parsed,
) -> dict:
"""
Calculates the probabilities for the feature to be any of the types of `schema_types` based on a set of heuristic
rules
Parameters
----------
allowed_types: list(str)
List of allowed feature types. Can include the following types from const.py: NUMERIC, CATEGORICAL, TEXT,
DATETIME, BINARY. By default includes all.
median: see description in `const.py`
numeric_finite_count: see description in `const.py`
nrows: see description in `const.py`
null_like_count: see description in `const.py`
empty_count: see description in `const.py`
whitespace_count: see description in `const.py`
cardinality: see description in `const.py`
datetime_count: see description in `const.py`
datetime_non_float_count: see description in `const.py`
datetime_rows_parsed: see description in `const.py`
Returns
-------
dict: feature type probabilities
The probability for the feature to be each of the types NUMERIC, CATEGORICAL, TEXT, DATETIME, BINARY. Type not
included in `allowed_types` will have a zero probability
"""
# Initialize all types to zero
numeric = 0
categorical = 0
text = 0
datetime = 0
binary = 0
# Probability-like score of column being numeric is proportional to the fraction of entries castable to float.
if ft.NUMERIC in allowed_types and not np.isnan(median):
numeric = numeric_finite_count / (nrows - null_like_count - empty_count - whitespace_count)
# Probability-like score of column being text is proportional to the fraction of non-numeric, non-empty entries.
if ft.TEXT in allowed_types:
text_like_rows = nrows - numeric_finite_count - null_like_count - empty_count - whitespace_count
text = text_like_rows / nrows
if cardinality == 2:
if ft.BINARY in allowed_types:
binary = 1.0
text = 0.0
if ft.CATEGORICAL in allowed_types:
categorical = 1.0
text = 0.0
elif ft.CATEGORICAL in allowed_types:
if cardinality / nrows < 0.01:
# If there are on average more than 1/.01 = 100 entries per category, use full categorical and disable text.
categorical = 1.0
text = 0.0
elif 0.01 <= cardinality / nrows < 0.2:
# If there are on average fewer than 1/.01 = 100 but more than 1/.2 = 5 entries per category,
# then proportionally decrease probability to zero.
categorical = 1 - (cardinality / nrows - 0.01) / 0.19
else:
# Don't count as categorical if on average there are fewer than 5 entries per category.
categorical = 0.0
if (
ft.DATETIME in allowed_types
and datetime_non_float_count / datetime_rows_parsed > 0.05
and datetime_count / datetime_rows_parsed > 0.6
):
datetime = 1.0
text = 0.0
categorical = 0.0
numeric = 0.0
# Normalize so that scores sum to 1.
normalizing_sum = numeric + categorical + text + datetime + binary
if normalizing_sum == 0:
raise ValueError(f"Error: scores for all allowed types are zero. Allowed types: {allowed_types}")
numeric = numeric / normalizing_sum
categorical = categorical / normalizing_sum
text = text / normalizing_sum
datetime = datetime / normalizing_sum
binary = binary / normalizing_sum
return {ft.NUMERIC: numeric, ft.CATEGORICAL: categorical, ft.TEXT: text, ft.DATETIME: datetime, ft.BINARY: binary} | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/profilers/type_inference.py | 0.792223 | 0.635477 | type_inference.py | pypi |
import pandas as pd
import numpy as np
from sagemaker_data_insights.const import FeatureType as ft
def _numpy_conversion(x: pd.Series, y: pd.Series = None) -> tuple:
"""
Converts original pandas column data to numpy and excludes null value.
Parameters
----------
x : pandas.Series
raw column data
y : pandas.Series or Nones
raw target column data(if any)
Returns
-------
x_numpy : np.ndarray
Non-null x in numpy
y_numpy : None or np.ndarray
None if y is None, otherwise non-null y in numpy
"""
x_numpy = x.to_numpy().reshape((-1, 1))
valid_idxs = ~pd.isnull(x_numpy)
x_numpy = x_numpy[valid_idxs].astype(str).reshape((-1, 1))
y_numpy = None if y is None else y.to_numpy().reshape((-1, 1))[valid_idxs].reshape((-1, 1))
return x_numpy, y_numpy
def missing_ratio(metrics: dict) -> float:
return float((metrics["null_like_count"] + metrics["empty_count"] + metrics["whitespace_count"]) / metrics["nrows"])
def valid_ratio(metrics: dict, feature_type: str) -> float:
if feature_type == ft.NUMERIC:
return float(metrics["numeric_finite_count"] / metrics["nrows"])
if feature_type in [ft.CATEGORICAL, ft.BINARY, ft.TEXT]:
return float(
(metrics["nrows"] - metrics["null_like_count"] - metrics["empty_count"] - metrics["whitespace_count"])
/ metrics["nrows"]
)
if feature_type == ft.DATETIME:
return float(metrics["datetime_count"] / metrics["datetime_rows_parsed"])
if feature_type == ft.VECTOR:
return 1 - missing_ratio(metrics)
raise ValueError(f"Unsupported feature type {feature_type}")
def get_valid_transformed_data(x_transformed: np.array):
valid_idxs = np.isfinite(x_transformed)
x_transformed_valid = x_transformed[valid_idxs]
return valid_idxs, x_transformed_valid
def unique_without_whitespaces(x):
"""
Returns the list of unique items with their counts excluding items of only whitespaces. Items of only whitespaces
are considered missing thus they are not valid keys for frequent elements plots
"""
unique, counts = np.unique(x, return_counts=True)
unique_ = []
counts_ = []
for u, c in zip(unique, counts):
if str(u).strip() != "":
unique_.append(u)
counts_.append(int(c))
return np.array(unique_), np.array(counts_) | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/utils/column_utils.py | 0.784113 | 0.580233 | column_utils.py | pypi |
import logging
import scipy
import pandas as pd
import numpy as np
from sagemaker_data_insights.const import INSIGHTS, TaskType as tt
from sagemaker_data_insights.analyzers.insights.utils import get_label_encoder
from sagemaker_data_insights.analyzers.insights.model_insights import regression_insights, classification_insights
from sagemaker_data_insights.utils.column_utils import unique_without_whitespaces
from sagemaker_data_insights.histogram_functions import calc_robust_histogram, robust_histogram_num_outliers
def analyze_target_regression(
y: pd.Series, metrics: dict, num_bins: int = 20, max_num_common_labels: int = 10, max_num_outliers: int = 5,
get_histogram: bool = False
):
"""
Target column analyzer for regression task
Parameters
----------
y : pandas.Series
target column (could be raw. Doesn't have to be encoded)
metrics : dict
dictionary that must include all the keys in REQUIRED_TARGET_METRICS. While `analyze_target_regression`
is usually applied on a sample of the data, the metrics should be calculated on the whole data or on a larger
sample
num_bins : int >= 3
number of bins in histograms
max_num_common_labels : int >= 1
max number of most common labels to return in `labels` and `label_counts` fields
max_num_outliers : int >= 0
max number of outliers to to return in `low_outlier_idxs` and `high_outlier_idxs` fields
get_histogram: bool, False
whether to produce histogram and robust histogram, default to False
Returns
-------
dict: data insights metrics
labels: list of all labels in the target column sorted by descending count order
label_counts: list of label counts sorted by descending count order
valid_ratio: ratio of the number of numeric finite values to the number of samples
name: name of target column
outliers_ratio: ratio between number of outliers to number of samples
mean: mean of numeric values (outliers included)
median: median of numeric values (outliers included)
skew: skew of numeric values (outliers included). Calculated using scipy.stats.skew
kurtosis: kurtosis of numeric values (outliers included). Calculated using scipy.stats.kurtosis
histogram: histogram of numeric values (outliers included). Calculated using numpy.histogram
robust_histogram: robust_histogram of numeric values (outliers included). Calculated using calc_robust_histogram
metrics: metrics provided in input
{cs.INSIGHTS}: a list of insights. Can include the following insights: SKEWED_TARGET, HEAVY_TAILED_TARGET,
TARGET_OUTLIERS, REGRESSION_FREQUENT_LABEL, REGRESSION_NONNUMERIC and REGRESSION_MANY_NONNUMERIC. The
insights are documented in `insights.py`
dict: auxiliary dict including the following:
label_encoder: `LabelEncoder` transform
valid_row_idxs: (np.ndarray) valid rows indicator
low_outlier_idxs: (list) indexes of low value outliers for regression
high_outlier_idxs: (list) indexes of high value outliers for regression
"""
profiles = {}
label_encoder, labels, label_counts, y_encoded, _ = _analyze_target(y, tt.REGRESSION, metrics)
valid_rows = np.isfinite(y_encoded).ravel()
valid_encoded = y_encoded[valid_rows]
valid_row_idxs = np.nonzero(valid_rows)[0]
aux = {"label_encoder": label_encoder}
if get_histogram:
count, bin_edges = np.histogram(valid_encoded, bins=num_bins)
histogram = {
"hist_count": count.astype(int).tolist(),
"hist_edges": bin_edges.astype(float).tolist(),
"lower_bin_is_outlier": False,
"upper_bin_is_outlier": False,
}
profiles.update({"histogram": histogram})
robust_histogram = calc_robust_histogram(valid_encoded, num_bins=num_bins)
# count outliers to calculate `outliers_ratio`
num_outliers = robust_histogram_num_outliers(robust_histogram)
# get idxs of lowest outliers to be output as `low_outlier_idxs`
low_outlier_idxs = []
if robust_histogram["lower_bin_is_outlier"]:
for idx in np.argsort(valid_encoded.ravel())[:max_num_outliers]:
value = valid_encoded[idx]
if value < robust_histogram["hist_edges"][1]:
low_outlier_idxs.append(valid_row_idxs[idx])
# get idxs of highest outliers to be output as `high_outlier_idxs`
high_outlier_idxs = []
if robust_histogram["upper_bin_is_outlier"]:
for idx in reversed(np.argsort(valid_encoded.ravel())[-max_num_outliers:]):
value = valid_encoded[idx]
if value > robust_histogram["hist_edges"][-2]:
high_outlier_idxs.append(valid_row_idxs[idx])
aux.update({"invalid_row_idxs": np.nonzero(~valid_rows)[0], "low_outlier_idxs": low_outlier_idxs,
"high_outlier_idxs": high_outlier_idxs})
outliers_ratio = float(num_outliers / valid_encoded.shape[0])
skew = float(scipy.stats.skew(valid_encoded.ravel()))
kurtosis = float(scipy.stats.kurtosis(valid_encoded.ravel()))
# Check for target insights
insights = regression_insights(outliers_ratio, skew, kurtosis, labels, label_counts, metrics)
profiles.update({
"labels": labels[:max_num_common_labels],
"label_counts": label_counts[:max_num_common_labels],
"valid_ratio": float(metrics["numeric_finite_count"] / metrics["nrows"]),
"missing_ratio": float(
(metrics["null_like_count"] + metrics["empty_count"] + metrics["whitespace_count"]) / metrics["nrows"]
),
"name": y.name,
"outliers_ratio": outliers_ratio,
"mean": float(np.nanmean(valid_encoded)),
"median": float(np.nanmedian(valid_encoded)),
"skew": skew,
"kurtosis": kurtosis,
INSIGHTS: insights,
"metrics": metrics,
"robust_histogram": robust_histogram,
})
return (
profiles,
aux
)
def analyze_target_classification(
y: pd.Series, metrics: dict, max_num_common_labels: int = 10,
):
"""
Target column analyzer for classification task
Parameters
----------
y : pandas.Series
target column (not encoded)
metrics : dictionary that must include all the keys in REQUIRED_TARGET_METRICS.
While `analyze_target_classification` is usually applied on a sample of the data,
the metrics should be calculated on the whole data or on a larger sample. See const.py
max_num_common_labels : int >= 1
max number of most common labels to return in `labels` and `label_counts` fields
Returns
-------
dict: data insights metrics
labels: list of all labels in the target column sorted by descending count order
label_counts: list of label counts sorted by descending count order
valid_ratio: ratio of the number of not null like values to the number of samples
name: name of target column
frequent_elements: calculated based on `labels` and `label_counts` provided in metrics
metrics: metrics provided in input
insights: a list of insights. Can include the following insights: VERY_SMALL_MINORITY, HIGH_TARGET_CARDINALITY,
RARE_TARGET_LABEL and SKEWED_LABEL_FREQUENCY. The insights are documented in `insights.py`
dict: auxiliary dict including the following:
label_encoder: `LabelEncoder` transform
valid_row_idxs: (np.ndarray) valid rows indicator
y_map: dict. label_encoder mapping e.g. {0: 'dog', 1: 'cat', 2: 'mouse'}
task: str either BINARY_CLASSIFICATION or MULTICLASS_CLASSIFICATION
"""
try:
# When the data type of y is string: Null, empty and cells of only whitespace are considered missing
valid_rows = (~pd.isnull(y)) & (y.str.strip() != "")
except AttributeError:
# When the data type of y is not string: only Nulls are considered missing
valid_rows = ~pd.isnull(y)
y = y[valid_rows]
task = tt.BINARY_CLASSIFICATION if len(np.unique(y.to_numpy().astype(str))) == 2 else tt.MULTICLASS_CLASSIFICATION
logging.debug("task = %s", task)
label_encoder, labels, label_counts, _, sample_size = _analyze_target(y, task, metrics)
y_map = {label_encoder.transform([label])[0]: label for label in labels}
# Check for target insights
insights = classification_insights(task, labels, label_counts, sample_size)
sum_label_counts = np.sum(label_counts)
return (
{
"labels": labels[:max_num_common_labels],
"label_counts": label_counts[:max_num_common_labels],
"missing_ratio": float(
(metrics["null_like_count"] + metrics["empty_count"] + metrics["whitespace_count"]) / metrics["nrows"]
),
"valid_ratio": float(
(metrics["nrows"] - metrics["null_like_count"] - metrics["empty_count"] - metrics["whitespace_count"])
/ metrics["nrows"]
),
"name": y.name,
"frequent_elements": {
"value": labels[:max_num_common_labels],
"frequency": [float(lc / sum_label_counts) for lc in label_counts[:max_num_common_labels]],
},
"insights": insights,
"metrics": metrics,
},
{
"label_encoder": label_encoder,
"y_map": y_map,
"task": task,
"invalid_row_idxs": np.nonzero(~np.array(valid_rows))[0],
},
)
def _analyze_target(y: pd.Series, task: str, metrics: dict):
# This function includes code that is shared between analyze_target_regression and analyze_target_classification
y_numpy = y.dropna().to_numpy()
_verify_y(y_numpy, task)
y_numpy = y_numpy.astype(str)
label_encoder = get_label_encoder(task, y_numpy)
if not isinstance(metrics["labels"], list) or not isinstance(metrics["label_counts"], list):
unique, counts = unique_without_whitespaces(y_numpy)
labels = unique.tolist()
label_counts = counts.tolist()
sample_size = len(y_numpy)
else:
labels = metrics["labels"]
label_counts = metrics["label_counts"]
sample_size = metrics["nrows"]
most_common_label_indexes = np.argsort(-np.array(label_counts))
labels = np.array(labels)[most_common_label_indexes].astype(str).tolist()
label_counts = np.array(label_counts)[most_common_label_indexes].astype(int).tolist()
y_encoded = label_encoder.transform(y.to_numpy().astype(str)).ravel().astype(float)
y_encoded[pd.isnull(y).to_numpy()] = np.nan
return label_encoder, labels, label_counts, y_encoded, sample_size
def _verify_y(y: np.array, task: str):
if task not in [tt.REGRESSION, tt.BINARY_CLASSIFICATION, tt.MULTICLASS_CLASSIFICATION]:
raise ValueError(
"Error: when y is provided task must be REGRESSION, BINARY_CLASSIFICATION or MULTICLASS_CLASSIFICATION"
)
if task in [tt.BINARY_CLASSIFICATION, tt.MULTICLASS_CLASSIFICATION, tt.CLASSIFICATION]:
if any(value is np.nan for value in y) > 0:
raise ValueError("Error: nans are not allowed in y for classification task")
if any(value is None for value in y) > 0:
raise ValueError("Error: None are not allowed in y for classification task") | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/analyzers/target_column_analyzer.py | 0.858955 | 0.592814 | target_column_analyzer.py | pypi |
import numpy as np
import pandas as pd
from sagemaker_data_insights.const import INSIGHTS, FeatureType as ft
from sagemaker_data_insights.utils.column_utils import valid_ratio
from sagemaker_data_insights.insights import Insights
from sagemaker_data_insights.text_utils import CharacterStatistics, token_importance
from sagemaker_data_insights.text_utils import sanitize_strings
from sagemaker_data_insights.patterns.analyze_patterns import analyze_text_patterns
from sagemaker_data_insights.histogram_functions import calc_frequent_elements
# TODO: add response payload so that the histograms can be computed
def analyze_text_feature( # noqa: C901
x: np.array,
metrics: dict,
random_state: int = 0,
num_top_words: int = 200,
requested_stats: list = None,
) -> dict:
"""
Derive statistics and insights specific to text features.
Parameters
----------
x : np.ndarray of size (height, 1)
text feature
metrics : dictionary
See the documentation in `analyze_feature`
random_state: int
random seed used for RNG
num_top_words: int
max number of most important words to return, see `from important_words` below
requested_stats : list of strings or None
Possible values:
* 'text_stats' for statistics on the distrbution of characters and tokens
* 'text_patterns' for results of an analysis of textual patterns
Returns
-------
dict: text feature insights. See feature_analyzer.py
"""
x_list = list(x.ravel())
insights = {
"valid_ratio": valid_ratio(metrics, ft.TEXT),
INSIGHTS: [],
"character_statistics": {},
}
if not requested_stats:
return insights
if "text_stats" in requested_stats:
# Numeric character statistics: from every string extract various ratio and count statistics. These are numeric
# features that capture various characteristics of the string
for desc, func in CharacterStatistics.functions.items():
feat = np.vectorize(func)(x_list).reshape((-1, 1))
num_unique = len(np.unique(feat))
if num_unique <= 1:
continue
feat_stats = {}
feat_stats["frequent_elements"] = calc_frequent_elements(
feat, y=None, task=None, max_num_elements=20, sort_type="value"
)
insights["character_statistics"][desc] = feat_stats
# token importance: add information about token importance when tokenizing based on words
insights["important_words"] = token_importance(
x, y=None, task=None, analyzer="word", num_top_features=num_top_words
)
if "text_patterns" in requested_stats:
expression_set = analyze_text_patterns(x.reshape(-1), min_coverage=0.8, random_state=random_state)
num_experiments, sample_size = expression_set.experiment_statistics()
pattern_columns = ["Pattern", "Relevance", "Regular expression", "Matches", "Non-matches"]
pattern_dict = {k: [] for k in pattern_columns}
for expr in expression_set.ranked_expressions():
pattern = expr.annotated_str()
confidence = expr.coverage_accumulator.value()
# Surround matches and nonmatches with angle brackets to show whitespace.
matches = sanitize_strings(expr.matches_histogram.top_n(5))
nonmatches = sanitize_strings(expr.outliers_histogram.top_n(5))
num_rows = max(len(matches), len(nonmatches))
padding = [""] * (num_rows - 1)
pattern_dict["Pattern"].extend([pattern] + padding)
# Our external language for accuracy/confidence is 'Relevance'.
pattern_dict["Relevance"].extend(["{:.2f}".format(100 * confidence)] + padding)
pattern_dict["Regular expression"].extend([expr.regex(use_token_lengths=True)] + padding)
pattern_dict["Matches"].extend(matches + [""] * (num_rows - len(matches)))
pattern_dict["Non-matches"].extend(nonmatches + [""] * (num_rows - len(nonmatches)))
# Getting insights
if 1 > confidence >= Insights.HIGH_CONFIDENCE_PATTERN_THRESHOLD:
insights[INSIGHTS].append(
Insights.generate(
Insights.HIGH_CONFIDENCE_PATTERN,
Insights.MEDIUM,
{
"pattern": pattern,
"confidence": confidence,
"num_experiments": num_experiments,
"sample_size": sample_size,
},
)
)
# If there are no patterns, return a table with a single column and an informative message.
if expression_set.best_expression() is None:
pattern_columns = ["Pattern"]
pattern_dict = {"Pattern": ["No textual patterns found."]}
pattern_df = pd.DataFrame(columns=pattern_columns, data=pattern_dict)
insights["text_patterns"] = pattern_df.to_dict()
return insights | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/analyzers/text_analyzer.py | 0.576542 | 0.548613 | text_analyzer.py | pypi |
import pandas as pd
import scipy
from sagemaker_data_insights.insights import Insights
from sagemaker_data_insights.const import INSIGHTS, FeatureType as ft
from sagemaker_data_insights.utils.column_utils import valid_ratio, get_valid_transformed_data
def analyze_numeric_feature(x_transformed: pd.Series, metrics: dict, frequent_elements: dict) -> dict:
"""
Feature analyzer for a numeric column
Parameters
----------
x_transformed: pd.Series
metrics: dict, metrics for x_transformed
frequent_elements: dict, frequenct elements calculated using histogram_functions.calc_frequent_elements
Returns
a dictionary of:
skew: calculated using scipy.stats.skew
kurtosis: calculated using scipy.stats.kurtosis
valid_ratio*: ratio of the number of finite numeric values to the number of samples
insights: pure statistical column insights
-------
"""
_, x_transformed_valid = get_valid_transformed_data(x_transformed)
# Insights for numeric feature
insights = []
# TODO: factor out insights
if (
frequent_elements["frequency"][0] > Insights.NUMERIC_DISGUISED_THRESHOLD
and len(frequent_elements["frequency"]) > 1
and frequent_elements["frequency"][0] > Insights.NUMERIC_DISGUISED_RATIO * frequent_elements["frequency"][1]
and str(frequent_elements["value"][0]).isnumeric()
and metrics["cardinality"] > Insights.NUMERIC_DISGUISED_MIN_UNIQUE
):
insights.append(
Insights.generate(
Insights.NUMERIC_DISGUISED_MISSING_VALUE,
Insights.MEDIUM_FEATURE,
{"value": frequent_elements["value"][0], "frequency": frequent_elements["frequency"][0]},
)
)
return {
"skew": float(scipy.stats.skew(x_transformed_valid.ravel())),
"kurtosis": float(scipy.stats.kurtosis(x_transformed_valid.ravel())),
"valid_ratio": valid_ratio(metrics, ft.NUMERIC),
INSIGHTS: insights
} | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/analyzers/numeric_analyzer.py | 0.655777 | 0.490968 | numeric_analyzer.py | pypi |
import pandas as pd
import logging
import sagemaker_data_insights.const as cs
from sagemaker_data_insights.const import FeatureType as ft
from sagemaker_data_insights.utils.column_utils import missing_ratio
from sagemaker_data_insights.analyzers.binary_analyzer import analyze_binary_feature
from sagemaker_data_insights.analyzers.categorical_analyzer import analyze_categorical_feature
from sagemaker_data_insights.analyzers.datetime_analyzer import analyze_datetime_feature
from sagemaker_data_insights.analyzers.numeric_analyzer import analyze_numeric_feature
from sagemaker_data_insights.analyzers.text_analyzer import analyze_text_feature
from sagemaker_data_insights.analyzers.vector_analyzer import analyze_vector_feature
from sagemaker_data_insights.utils.feature_transform import get_feature_transform
from sagemaker_data_insights.histogram_functions import calc_frequent_elements
def analyze_feature_column(x: pd.Series, feature_type: str, metrics: dict, requested_stats: list = None) -> dict:
"""
Feature analyzer. Provides ML relevant statistics about the feature. Different statistics will be derived for each
feature type.
Parameters
----------
x : pandas.Series
raw feature column vector (e.g. NOT encoded using one hot encoder)
feature_type: str
NUMERIC, CATEGORICAL, TEXT, DATETIME or BINARY. If unknown, use `get_feature_type`
metrics : dictionary that must include all the following keys: nrows, numeric_finite_count, cardinality and
empty_count. See the descriptions in const.py
requested_stats : list of strings or None
Possible values:
None - return the default set of stats
Additional values are:
'text_stats' which return the default set of stats and the additionaly requested stats.
For example: ['text_stats']
Returns
-------
dict: data insights metrics. Statistics will be derived according to the provided feature type. The fields with *
are derived from the provided metrics, all other - from x and y
All feature types:
name: feature name taken from x
type: feature type provided in the input
metrics*: metrics dict provided in the input
prediction_power and normalized_prediction_power: available when y and task are provided
frequent_elements: calculated using histogram_functions.calc_frequent_elements
missing_ratio*: ratio of number of null like and empty rows to the number of rows
{cs.INSIGHTS}: list of insights. Can include: TARGET_LEAKAGE, UNINFORMATIVE_FEATURE,
NUMERIC_DISGUISED_MISSING_VALUE, CATEGORICAL_RARE_CATEGORIES
Numeric:
outliers_ratio: ratio of the number of outliers to number of samples
skew: calculated using scipy.stats.skew
kurtosis: calculated using scipy.stats.kurtosis
valid_ratio*: ratio of the number of finite numeric values to the number of samples
Categorical / Binary:
valid_ratio*: ratio of the number of not null like values and not whitespace strings to the number of
samples
Text:
valid_ratio*: ratio of the number of not null like values and not whitespace strings to the number of
samples
important_words: for each word prediction_power, normalized_prediction_power and frequency
character_statistics: dictionary with character statistics. For each statistic a dictionary with
frequent_elements. The possible character statistics are:
word_count: number of words
char_count: string length
special_ratio: ratio of non alphanumeric characters to non-spaces in the string, 0 if empty string
digit_ratio: ratio of digits characters to non-spaces in the string, 0 if empty string
lower_ratio: ratio of lowercase characters to non-spaces in the string, 0 if empty string
capital_ratio: ratio of uppercase characters to non-spaces in the string, 0 if empty string
Note that some of them could be missing if there's only one value. For example, word_count will be
missing if all texts contain exactly one word.
Datetime:
valid_ratio: ratio of number valid datetime values to the number of samples
datetime_features: dict. Prediction power and robust histogram or frequent elements stats for each of the
following: "month", "hour", "weekday", "year", "minute", "second", "week". Note that some items could
be missing if there is no relevant data in the feature
"""
if feature_type not in [ft.NUMERIC, ft.CATEGORICAL, ft.TEXT, ft.DATETIME, ft.BINARY, ft.VECTOR]:
raise ValueError(
f"feature_type must be one of {ft.NUMERIC}, {ft.CATEGORICAL}, {ft.TEXT}, {ft.DATETIME}, {ft.BINARY} or "
f"{ft.VECTOR}"
)
x_numpy = x.to_numpy().reshape((-1, 1))
valid_idxs = ~pd.isnull(x_numpy)
x_numpy = x_numpy[valid_idxs].astype(str).reshape((-1, 1))
insights = {
"name": x.name,
"type": feature_type,
"metrics": metrics,
}
if len(x_numpy) == 0:
insights["missing_ratio"] = 1
insights["valid_ratio"] = 0
insights["frequent_elements"] = []
insights[cs.INSIGHTS] = []
return insights
if feature_type in [ft.NUMERIC, ft.DATETIME]:
feature_transform = get_feature_transform(feature_type, True).fit(x_numpy)
x_transformed = feature_transform.transform(x_numpy)
insights["missing_ratio"] = missing_ratio(metrics)
insights["frequent_elements"] = calc_frequent_elements(x_numpy)
# Add insights and statistics specific for each feature type
insights.update(
{
ft.NUMERIC: lambda: analyze_numeric_feature(
x_transformed, metrics, insights["frequent_elements"]
),
ft.TEXT: lambda: analyze_text_feature(
x_numpy, metrics, requested_stats=requested_stats
),
ft.DATETIME: lambda: analyze_datetime_feature(
feature_transform, x_transformed, metrics
),
ft.BINARY: lambda: analyze_binary_feature(metrics),
ft.CATEGORICAL: lambda: analyze_categorical_feature(insights["frequent_elements"], metrics),
ft.VECTOR: lambda: analyze_vector_feature(metrics),
}[feature_type]()
)
return insights | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/analyzers/feature_analyzer.py | 0.863593 | 0.635965 | feature_analyzer.py | pypi |
import numpy as np
from sagemaker_data_insights.utils.column_utils import valid_ratio
from sagemaker_data_insights.const import INSIGHTS, FeatureType as ft
from sagemaker_data_insights.histogram_functions import calc_frequent_elements, calc_robust_histogram
def analyze_datetime_feature(
feature_transform, x_transformed: np.array, metrics: dict
) -> dict:
"""
Date time analyzer
Parameters
----------
feature_transform
x_transformed
metrics: dict, metrics for datetime column
Returns
-------
dict:
valid_ratio
datetime_features
insights: list of insights
"""
datetime_features = {}
num_bins = 20
# go over the internal features produced by the feature_transform e.g. week, month, hour etc.
for idx, e in enumerate(feature_transform.extract_):
col = x_transformed[:, idx].reshape((-1, 1))
valid_rows = np.isfinite(col)
col = col[valid_rows].reshape((-1, 1))
internal_feature_insights = {}
# All the datetime properties start with "extract_", e.g "extract_week", "extract_month"
if e.extract_func.__name__[:8] != "extract_":
raise ValueError("Not a valid datetime feature")
internal_feature_name = e.extract_func.__name__[8:] # remove `extract_` from the head of the string
# Some internal feature types should always be frequent elements. For others, they are frequent elements when
# they contain few unique elements or histogram when they contain many unique elements
if internal_feature_name in ["quarter", "month", "hour", "weekday"] or len(np.unique(col)) <= num_bins:
internal_feature_insights["frequent_elements"] = calc_frequent_elements(
col.astype(int), None, task=None, sort_type="value", max_num_elements=len(np.unique(col))
)
else:
internal_feature_insights["robust_histogram"] = calc_robust_histogram(
col, None, task=None, num_bins=num_bins
)
datetime_features[internal_feature_name] = internal_feature_insights
return {
"valid_ratio": valid_ratio(metrics, ft.DATETIME),
"datetime_features": datetime_features,
INSIGHTS: [],
} | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/analyzers/datetime_analyzer.py | 0.668015 | 0.509581 | datetime_analyzer.py | pypi |
from enum import Enum
import numpy as np
import pandas as pd
from sagemaker_data_insights.insights import Insights
from sagemaker_data_insights.const import TaskType as tt
def regression_insights(outliers_ratio, skew, kurtosis, labels, label_counts, metrics):
insights = []
if outliers_ratio > 0:
if abs(skew) > Insights.SKEWED_TARGET_THRESHOLD:
insights.append(Insights.generate(Insights.SKEWED_TARGET, Insights.HIGH))
elif kurtosis > Insights.HEAVY_TAILED_TARGET_THRESHOLD:
insights.append(Insights.generate(Insights.HEAVY_TAILED_TARGET, Insights.HIGH))
elif kurtosis > Insights.TARGET_OUTLIERS_THRESHOLD:
insights.append(Insights.generate(Insights.TARGET_OUTLIERS, Insights.MEDIUM))
majority_label_frequency = label_counts[0] / metrics["nrows"]
allowed_frequency = Insights.ALLOWED_FREQUENCY_FACTOR / metrics["cardinality"]
if majority_label_frequency > max(Insights.ALLOWED_FREQUENCY, allowed_frequency):
insights.append(
Insights.generate(
Insights.REGRESSION_FREQUENT_LABEL,
Insights.MEDIUM,
{"label": labels[0], "frequency": majority_label_frequency},
)
)
non_numeric_count = metrics["nrows"] - metrics["numeric_finite_count"]
non_numeric_frequency = non_numeric_count / metrics["nrows"]
if non_numeric_frequency > 0:
info = {
"frequency": non_numeric_frequency,
"values": list(filter(lambda x: not np.isfinite(pd.to_numeric(x, errors="coerce")), labels))[
: Insights.NUM_NONUMERIC_LABELS
],
}
if non_numeric_frequency > Insights.REGRESSION_MANY_NONNUMERIC_THRESHOLD:
insights.append(Insights.generate(Insights.REGRESSION_MANY_NONNUMERIC, Insights.HIGH, info))
else:
insights.append(Insights.generate(Insights.REGRESSION_NONNUMERIC, Insights.MEDIUM, info))
return insights
def classification_insights(task, labels, label_counts, sample_size):
insights = []
if task == tt.BINARY_CLASSIFICATION:
for label, count in zip(labels, label_counts):
if count < Insights.VERY_SMALL_MINORITY_THRESHOLD:
insights.append(
Insights.generate(
Insights.VERY_SMALL_MINORITY,
Insights.HIGH,
{"label": label, "count": count, "sample_size": sample_size, "ratio": count / sample_size},
)
)
elif task == tt.MULTICLASS_CLASSIFICATION:
if len(labels) > Insights.HIGH_TARGET_CARDINALITY_THRESHOLD:
insights.append(
Insights.generate(Insights.HIGH_TARGET_CARDINALITY, Insights.MEDIUM, {"cardinality": len(labels)})
)
else:
for label, count in zip(labels, label_counts):
if count <= Insights.RARE_TARGET_LABEL_THRESHOLD:
insights.append(
Insights.generate(Insights.RARE_TARGET_LABEL, Insights.HIGH, {"label": label, "count": count})
)
elif count < label_counts[0] * Insights.IMBALANCED_CLASS_RATIO:
insights.append(
Insights.generate(
Insights.IMBALANCED_CLASSES,
Insights.MEDIUM,
{
"label": label,
"count": count,
"most_frequent_label": labels[0],
"most_frequent_label_count": label_counts[0],
},
)
)
return insights
class ModelInsightsConstraint(Enum):
# The maximum number of unique labels in a numeric target column to treat the problem as classification.
TASK_TYPE_MAX_NUM_UNIQUES_FOR_NUMERIC_MULTICLASS = 100
# The maximum number of unique labels in a numeric target column under which we always treat the problem as
# regression.
TASK_TYPE_MAX_NUM_UNIQUES_FOR_OBVIOUS_MULTICLASS = 5
# By how many times the target column's maximum should exceed the number of labels to treat the column as ordinal.
TASK_TYPE_MAX_NUM_UNIQUES_MULTIPLE_FOR_ORDINAL = 10
# The minimum fraction of values which should be numeric for the target to be treated as numeric.
TASK_TYPE_MIN_FRACTION_FOR_NUMERIC = 0.5
# The minimum value that #uniques / #rows should be to allow regression when the labels are mostly integers.
TASK_TYPE_MIN_FRACTION_INTEGER_UNIQUES_FOR_REGRESSION = 0.015
# The minimum fraction of values which should be non-integer floats in order to treat the problem as regression.
TASK_TYPE_MIN_FRACTION_NONINTEGER_FLOATS_FOR_REGRESSION = 0.01
# Maximum number of supported classes for multiclass classification problems.
TASK_TYPE_MAX_NUM_SUPPORTED_CLASSES = 2000 | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/analyzers/insights/model_insights.py | 0.585457 | 0.338569 | model_insights.py | pypi |
import numpy as np
def _encode_numpy(values, uniques=None, encode=False, check_unknown=True):
# only used in _encode below, see docstring there for details
if uniques is None:
if encode:
uniques, encoded = np.unique(values, return_inverse=True)
return uniques, encoded
else:
# unique sorts
return np.unique(values)
if encode:
if check_unknown:
diff = _encode_check_unknown(values, uniques)
if diff:
raise ValueError("y contains previously unseen labels: %s"
% str(diff))
encoded = np.searchsorted(uniques, values)
return uniques, encoded
else:
return uniques
def _encode_python(values, uniques=None, encode=False):
# only used in _encode below, see docstring there for details
if uniques is None:
uniques = sorted(set(values))
uniques = np.array(uniques, dtype=values.dtype)
if encode:
table = {val: i for i, val in enumerate(uniques)}
try:
encoded = np.array([table[v] for v in values])
except KeyError as e:
raise ValueError("y contains previously unseen labels: %s"
% str(e))
return uniques, encoded
else:
return uniques
def _encode(values, uniques=None, encode=False, check_unknown=True):
"""Helper function to factorize (find uniques) and encode values.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
The numpy method has the limitation that the `uniques` need to
be sorted. Importantly, this is not checked but assumed to already be
the case. The calling method needs to ensure this for all non-object
values.
Parameters
----------
values : array
Values to factorize or encode.
uniques : array, optional
If passed, uniques are not determined from passed values (this
can be because the user specified categories, or because they
already have been determined in fit).
encode : bool, default False
If True, also encode the values into integer codes based on `uniques`.
check_unknown : bool, default True
If True, check for values in ``values`` that are not in ``unique``
and raise an error. This is ignored for object dtype, and treated as
True in this case. This parameter is useful for
_BaseEncoder._transform() to avoid calling _encode_check_unknown()
twice.
Returns
-------
uniques
If ``encode=False``. The unique values are sorted if the `uniques`
parameter was None (and thus inferred from the data).
(uniques, encoded)
If ``encode=True``.
"""
if values.dtype == object:
try:
res = _encode_python(values, uniques, encode)
except TypeError:
raise TypeError("argument must be a string or number")
return res
else:
return _encode_numpy(values, uniques, encode,
check_unknown=check_unknown)
def _encode_check_unknown(values, uniques, return_mask=False):
"""
Helper function to check for unknowns in values to be encoded.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : array
Values to check for unknowns.
uniques : array
Allowed uniques values.
return_mask : bool, default False
If True, return a mask of the same shape as `values` indicating
the valid values.
Returns
-------
diff : list
The unique values present in `values` and not in `uniques` (the
unknown values).
valid_mask : boolean array
Additionally returned if ``return_mask=True``.
"""
if values.dtype == object:
uniques_set = set(uniques)
diff = list(set(values) - uniques_set)
if return_mask:
if diff:
valid_mask = np.array([val in uniques_set for val in values])
else:
valid_mask = np.ones(len(values), dtype=bool)
return diff, valid_mask
else:
return diff
else:
unique_values = np.unique(values)
diff = list(np.setdiff1d(unique_values, uniques, assume_unique=True))
if return_mask:
if diff:
valid_mask = np.in1d(values, uniques)
else:
valid_mask = np.ones(len(values), dtype=bool)
return diff, valid_mask
else:
return diff | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/analyzers/insights/sklearn_utils.py | 0.793186 | 0.693479 | sklearn_utils.py | pypi |
import numpy as np
import pandas as pd
import warnings
from sagemaker_data_insights.const import TaskType as tt
from sagemaker_sklearn_extension.impute import RobustImputer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import LabelEncoder
from sagemaker_data_insights.analyzers.insights.sklearn_utils import _encode, _encode_check_unknown
from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples
def get_label_encoder(task: str, y: np.array):
"""Prepares `LabelEncoder` against labels specified in an array, and returns a fitted `LabelEncoder` transform.
For regression, we change any non-numeric value to nan. For multiclass and binary classification, we encode them.
Args:
task (str): The problem type. Must be a constant from [REGRESSION, BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION].
y (np.array): target data to use for fitting.
Returns:
function: `LabelEncoder` transform after fitting.
"""
if task == tt.REGRESSION:
return NALabelEncoder().fit(y)
if task == tt.BINARY_CLASSIFICATION:
# This code is to insure that the minority class is encoded as `1`
y_transformed = LabelEncoder().fit_transform(y)
y_valid = y[np.isfinite(y_transformed)]
labels, counts = np.unique(y_valid, return_counts=True)
labels_count = []
for idx, label in enumerate(labels):
labels_count.append((label, counts[idx]))
if labels_count[0][1] > labels_count[1][1]:
majority_label = labels_count[0][0]
minority_label = labels_count[1][0]
else:
majority_label = labels_count[1][0]
minority_label = labels_count[0][0]
return RobustLabelEncoder(
labels=[majority_label], fill_label_value=minority_label, include_unseen_class=True
).fit(y)
return RobustLabelEncoder().fit(y)
class NALabelEncoder(BaseEstimator, TransformerMixin):
"""Encoder for transforming labels to NA values.
Uses `RobustImputer` on 1D inputs of labels
- Uses `is_finite_numeric` mask for encoding by default
- Only uses the `RobustImputer` strategy `constant` and fills using `np.nan`
- Default behavior encodes non-float and non-finite values as nan values in
the target column of a given regression dataset
Parameters
----------
mask_function : callable -> np.array, dtype('bool') (default=None)
A vectorized python function, accepts np.array, returns np.array
with dtype('bool')
For each value, if mask_function(val) == False, that value will
be imputed. mask_function is used to create a boolean mask that determines
which values in the input to impute.
Use np.vectorize to vectorize singular python functions.
"""
def __init__(self, mask_function=None):
self.mask_function = mask_function
def fit(self, y):
"""Fit the encoder on y.
Parameters
----------
y : {array-like}, shape (n_samples,)
Input column, where `n_samples` is the number of samples.
Returns
-------
self : NALabelEncoder
"""
self.model_ = RobustImputer(strategy="constant", fill_values=np.nan, mask_function=self.mask_function)
y = y.reshape(-1, 1)
self.model_.fit(X=y)
return self
def transform(self, y):
"""Encode all non-float and non-finite values in y as NA values.
Parameters
----------
y : {array-like}, shape (n_samples)
The input column to encode.
Returns
-------
yt : {ndarray}, shape (n_samples,)
The encoded input column.
"""
check_is_fitted(self, "model_")
y = y.reshape(-1, 1)
return self.model_.transform(y).flatten()
def inverse_transform(self, y):
"""Returns input column"""
return y
def _more_tags(self):
return {"X_types": ["1dlabels"]}
class RobustLabelEncoder(LabelEncoder):
"""Encode labels for seen and unseen labels.
Seen labels are encoded with value between 0 and n_classes-1. Unseen labels are encoded with
``self.fill_encoded_label_value`` with a default value of n_classes.
Similar to ``sklearn.preprocessing.LabelEncoder`` with additional features.
- ``RobustLabelEncoder`` encodes unseen values with ``fill_encoded_label_value`` or ``fill_label_value``
if ``fill_unseen_labels=True`` for ``transform`` or ``inverse_transform`` respectively
- ``RobustLabelEncoder`` can use predetermined labels with the parameter``labels``.
Examples
--------
>>> from sagemaker_sklearn_extension.preprocessing import RobustLabelEncoder
>>> rle = RobustLabelEncoder()
>>> rle.fit([1, 2, 2, 6])
RobustLabelEncoder(fill_encoded_label_value=None,
fill_label_value='<unseen_label>', fill_unseen_labels=True,
labels=None)
>>> rle.classes_
array([1, 2, 6])
>>> rle.transform([1, 1, 2, 6])
array([0, 0, 1, 2])
>>> rle.transform([1, 1, 2, 6, 1738])
array([ 0, 0, 1, 2, 3])
>>> rle.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
>>> rle.inverse_transform([-1738, 0, 0, 1, 2])
['<unseen_label>', 1, 1, 2, 6]
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> rle = RobustLabelEncoder()
>>> rle.fit(["hot dog", "hot dog", "banana"])
RobustLabelEncoder(fill_encoded_label_value=None,
fill_label_value='<unseen_label>', fill_unseen_labels=True,
labels=None)
>>> list(rle.classes_)
['banana', 'hot dog']
>>> rle.transform(["hot dog", "hot dog"])
array([1, 1])
>>> rle.transform(["banana", "llama"])
array([0, 2])
>>> list(rle.inverse_transform([2, 2, 1]))
['<unseen_label>', '<unseen_label>', 'hot dog']
Parameters
----------
labels : list of values (default = None)
List of unique values for label encoding. Overrides ``self.classes_``.
If ``labels`` is None, RobustLabelEncoder will automatically determine the labels.
fill_unseen_labels : boolean (default = True)
Whether or not to fill unseen values during transform or inverse_transform.
fill_encoded_label_value : int (default = n_classes)
Replacement value for unseen labels during ``transform``.
Default value is n_classes.
fill_label_value : str (default = '<unseen_label>')
Replacement value for unseen encoded labels during ``inverse_transform``.
include_unseen_class: boolean (default = False)
Whether or not ``fill_label_value`` should be included as a class.
Attributes
----------
classes_ : array of shape (n_classes,)
Holds the label for each class that is seen when the encoder is ``fit``.
"""
def __init__(
self,
labels=None,
fill_unseen_labels=True,
fill_encoded_label_value=None,
fill_label_value="<unseen_label>",
include_unseen_class=False,
):
super().__init__()
self.labels = labels
self.fill_unseen_labels = fill_unseen_labels
self.fill_encoded_label_value = fill_encoded_label_value
self.fill_label_value = fill_label_value
self.include_unseen_class = include_unseen_class
def fit(self, y):
"""Fit label encoder.
Parameters
----------
y : array-like of shape (n_samples,)
Label values.
Returns
-------
self : RobustLabelEncoder.
"""
y = column_or_1d(y, warn=True)
self.classes_ = self._check_labels_and_sort() or _encode(y)
return self
def _check_labels_and_sort(self):
if not self.labels:
return None
if self._is_sorted(self.labels):
return self.labels
warnings.warn("`labels` parameter is expected to be sorted. Sorting `labels`.")
return sorted(self.labels)
def _is_sorted(self, iterable):
return all(iterable[i] <= iterable[i + 1] for i in range(len(iterable) - 1))
def fit_transform(self, y):
"""Fit label encoder and return encoded labels.
Parameters
----------
y : array-like of shape [n_samples]
Label values.
Returns
-------
y_encoded : array-like of shape [n_samples]
Encoded label values.
"""
return self.fit(y).transform(y)
def transform(self, y):
"""Transform labels to normalized encoding.
If ``self.fill_unseen_labels`` is ``True``, use ``self.fill_encoded_label_value`` for unseen values.
Seen labels are encoded with value between 0 and n_classes-1. Unseen labels are encoded with
``self.fill_encoded_label_value`` with a default value of n_classes.
Parameters
----------
y : array-like of shape [n_samples]
Label values.
Returns
-------
y_encoded : array-like of shape [n_samples]
Encoded label values.
"""
check_is_fitted(self, "classes_")
y = column_or_1d(y, warn=True)
# transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
if self.fill_unseen_labels:
_, mask = _encode_check_unknown(y, self.classes_, return_mask=True)
y_encoded = np.searchsorted(self.classes_, y)
fill_encoded_label_value = self.fill_encoded_label_value or len(self.classes_)
y_encoded[~mask] = fill_encoded_label_value
else:
_, y_encoded = _encode(y, uniques=self.classes_, encode=True)
return y_encoded
def inverse_transform(self, y):
"""Transform labels back to original encoding.
If ``self.fill_unseen_labels`` is ``True``, use ``self.fill_label_value`` for unseen values.
Parameters
----------
y : numpy array of shape [n_samples]
Encoded label values.
Returns
-------
y_decoded : numpy array of shape [n_samples]
Label values.
"""
check_is_fitted(self, "classes_")
y = column_or_1d(y, warn=True)
if y.dtype.kind not in ("i", "u"):
try:
y = y.astype(np.float).astype(np.int)
except ValueError:
raise ValueError("`y` contains values not convertible to integer.")
# inverse transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
labels = np.arange(len(self.classes_))
diff = np.setdiff1d(y, labels)
if diff.size > 0 and not self.fill_unseen_labels:
raise ValueError("y contains previously unseen labels: %s" % str(diff))
y_decoded = [self.classes_[idx] if idx in labels else self.fill_label_value for idx in y]
return y_decoded
def get_classes(self):
"""Returns the values of the unencoded classes.
If ``self.include_unseen_class`` is ``True`` include ``self.fill_label_value`` as a class.
Returns
-------
classes : array of shape (n_classes,)
"""
if self.include_unseen_class and self.fill_unseen_labels:
return np.append(self.classes_, [self.fill_label_value])
return self.classes_
def calc_correlation(x: np.ndarray, y: np.ndarray) -> float:
"""Calculates the Pearson correlation between two arrays.
Args:
x (np.ndarray): The first array
y (np.ndarray): The second array
Returns:
float: The correlation.
"""
return float(pd.DataFrame(x).corrwith(pd.DataFrame(y))) | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/analyzers/insights/utils.py | 0.856962 | 0.719876 | utils.py | pypi |
from pyspark.sql import DataFrame
from sagemaker_data_insights.const import DeequFeatureType as ft, INSIGHTS
from sagemaker_data_insights.analyzers.spark_engine.numeric_analyzer import analyze_numeric_feature
from sagemaker_data_insights.analyzers.spark_engine.string_analyzer import analyze_string_feature
from sagemaker_data_insights.analyzers.spark_engine.string_analyzer import analyze_text_feature
from sagemaker_data_insights.insights import Insights
def analyze_feature_column(column: DataFrame, feature_type: str, profiles, requested_insights=None) -> dict:
"""
Analyze a feature column in a dataframe and generate feature insights
Args:
column: a dataframe with single column
feature_type: str,
profiles: dict, column profiles
requested_insights: list, requested insights, if None generates all applicable insights
"""
insights = {
"type": feature_type,
"metrics": profiles.all,
"missing_ratio": 1 - profiles.completeness,
"unique_count": profiles.approximateNumDistinctValues
}
# Common insights that are universal to all feature types
common_insights = []
if profiles.completeness < Insights.MISSING_VALUES_THRESHOLD:
common_insights.append(Insights.generate(Insights.MISSING_VALUES, Insights.MEDIUM_FEATURE))
if profiles.approximateNumDistinctValues == 1:
common_insights.append(Insights.generate(Insights.CONSTANT_FEATURE, Insights.LOW))
if profiles.approximateNumDistinctValues == len(column.columns):
common_insights.append(Insights.generate(Insights.ID_COLUMN, Insights.LOW))
# Generates insights for each feature type
if feature_type != ft.UNKNOWN:
insights.update(
{
ft.FRACTIONAL: lambda: analyze_numeric_feature(column, profiles),
ft.INTEGRAL: lambda: analyze_numeric_feature(column, profiles),
ft.STRING: lambda: analyze_string_feature(column, profiles),
ft.TEXT: lambda: analyze_text_feature(column, profiles),
}[feature_type]()
)
for i in common_insights:
insights[INSIGHTS].append(i)
return insights | /sagemaker_data_insights-0.4.0-py3-none-any.whl/sagemaker_data_insights/analyzers/spark_engine/feature_analyzer.py | 0.744749 | 0.534309 | feature_analyzer.py | pypi |
import logging
import ipywidgets as widgets
import pandas as pd
class ToggleWidget(widgets.VBox):
"""
Toggle display between the datawrangler widget and the pandas default display
"""
def __init__(
self, df, dw_widget_vbox, pandas_default_vbox, displaying_datawrangler=True
):
super().__init__()
self.dw_widget_vbox = dw_widget_vbox
# ToDo: optimize by lazy initialization
self.pandas_default_vbox = pandas_default_vbox
# ToDo: move to config
self.displaying_datawrangler = displaying_datawrangler
self.layout.height = "auto"
self.render()
def render(self):
if self.displaying_datawrangler:
toggle_button = self._get_to_display_pandas_default_button()
self.pandas_default_vbox.layout.display = "none"
self.dw_widget_vbox.layout.display = "block"
logging.info("Toggled to the sagemaker_datawrangler view.")
else:
toggle_button = self._get_to_display_dw_widget_button()
self.dw_widget_vbox.layout.display = "none"
self.pandas_default_vbox.layout.display = "block"
logging.info("Toggled to the pandas default view.")
self.children = [toggle_button]
def _get_to_display_dw_widget_button(self):
button = widgets.Button(description="View Data Wrangler table")
button.add_class("dw-widget-btn-secondary")
def _on_button_clicked(button):
logging.info("Button clicked: View the DataWrangler table")
self.displaying_datawrangler = True
self.render()
button.on_click(_on_button_clicked)
return button
def _get_to_display_pandas_default_button(self):
button = widgets.Button(description="View Pandas table")
button.add_class("dw-widget-btn-secondary")
def _on_button_clicked(button):
logging.info("Button clicked: View the Pandas table")
self.displaying_datawrangler = False
self.render()
button.on_click(_on_button_clicked)
return button | /sagemaker_datawrangler-0.4.3-py3-none-any.whl/sagemaker_datawrangler/config/toggle_widget.py | 0.494873 | 0.195671 | toggle_widget.py | pypi |
import json
import traceback
from collections import Counter
from enum import Enum
from typing import List
from .logging import ERROR
from .platform import APP_CONTEXT
class EventStatus(Enum):
"""
API event status options for OE logging
"""
START = "start"
FAILED = "failed"
SUCCESS = "success"
# ToDo: move "ganymede" to a const prefix variable
class MetricsEventType(Enum):
"""Event types for all backend structured logs"""
IMPORT_SM_DW = "ganymede.import_sagemaker_dw"
INITIALIZATION = "ganymede.initialization"
LAZY_LOADED = "ganymede.lazy_loaded"
APPLY_TRANSFORM = "ganymede.apply_transform"
COMPUTE_TARGET_INSIGHTS = "ganymede.compute_target_insights"
COMPUTE_COLUMN_INSIGHTS_DF = "ganymede.df.compute_column_insights"
UPDATE_COMPUTE_ROW_COUNT = "ganymede.update_compute_row_count"
ERROR = "ganymede.backend.error"
UNEXPECTED_EVENT_TYPE = "ganymede.unexpected_event_type"
UNEXPECTED_EVENT_PARAMETER = "ganymede.unexpected_event_parameter"
# Event metrics
def create_session_start_log():
session_start_log = {
"event_type": MetricsEventType.IMPORT_SM_DW.value,
"app_context": APP_CONTEXT,
}
return json.dumps(session_start_log)
# ToDo: change to event_type: MetricsEventType
def create_event_metrics(event_type: str, event_status: EventStatus, metadata=None):
request_metrics = {
"event_type": event_type,
"event_status": event_status.value,
"app_context": APP_CONTEXT,
}
if metadata:
request_metrics["metadata"] = metadata
return json.dumps(request_metrics)
def get_apply_transform_metrics(
warning_name, transform_name, operator_id, metadata=None
):
request_metrics = {
"event_type": MetricsEventType.APPLY_TRANSFORM.value,
"warning_name": warning_name,
"transform_name": transform_name,
"operator_id": operator_id,
"app_context": APP_CONTEXT,
}
if metadata:
request_metrics["metadata"] = metadata
return json.dumps(request_metrics)
def get_column_insights_metrics(data_quality_issues=[]):
request_metrics = {
"event_type": MetricsEventType.COMPUTE_COLUMN_INSIGHTS.value,
"app_context": APP_CONTEXT,
"data_quality_issues": data_quality_issues,
"num_data_quality_issues": len(data_quality_issues)
if data_quality_issues
else 0,
}
return json.dumps(request_metrics)
def get_df_insights_metrics(df_column_insights):
df_insights = []
for _, insights in df_column_insights.items():
if insights:
df_insights.extend(
[insights["warnings"]["insight_id"] for insights in insights]
)
request_metrics = {
"event_type": MetricsEventType.COMPUTE_COLUMN_INSIGHTS_DF.value,
"app_context": APP_CONTEXT,
"num_data_quality_issues": len(df_insights),
"data_quality_issues": Counter(df_insights),
}
return json.dumps(request_metrics)
def get_target_column_metrics(problem_type, data_quality_issues=[]):
request_metrics = {
"event_type": MetricsEventType.COMPUTE_TARGET_INSIGHTS.value,
"problem_type": problem_type,
"app_context": APP_CONTEXT,
"num_data_quality_issues": len(data_quality_issues)
if data_quality_issues
else 0,
"data_quality_issues": data_quality_issues,
}
return json.dumps(request_metrics)
def create_update_compute_row_count_metrics(row_count, column_count):
request_metrics = {
"event_type": MetricsEventType.UPDATE_COMPUTE_ROW_COUNT.value,
"row_count": row_count,
"column_count": column_count,
"app_context": APP_CONTEXT,
}
return json.dumps(request_metrics)
# Error metrics
def create_structured_error_log(
exception,
event_type=None,
message=None,
error_type=MetricsEventType.ERROR.value,
level=ERROR,
):
error_log = {
"level": level,
"app_context": APP_CONTEXT,
"event_type": event_type,
"error_type": error_type,
"error_name": None if not exception else type(exception).__name__,
"message": message,
"trusted_error_details": None
if not exception
else get_trusted_error_details(exception.__traceback__),
}
return json.dumps(error_log)
def get_trusted_error_details(tb) -> List[str]:
"""Return trusted error details with non-sensitive info"""
stack_summary = traceback.extract_tb(tb)
trusted_error_details = []
for frame_summary in stack_summary:
trusted_error_details.append(
f"{frame_summary.filename}:{frame_summary.lineno}:{frame_summary.name}"
)
return trusted_error_details | /sagemaker_datawrangler-0.4.3-py3-none-any.whl/sagemaker_datawrangler/logging/metrics.py | 0.612541 | 0.215186 | metrics.py | pypi |
import json
import os
from sagemaker_datawrangler._version import version_info
LL_INTERNAL_METADATA_FILE = "/opt/.sagemakerinternal/internal-metadata.json"
KGW_APP_METADATA_FILE = "/opt/ml/metadata/resource-metadata.json"
PROD = "prod"
DEVO = "devo"
def _get_studio_metadata():
"""Read Studio metadata file from conventional place
Returns: Studio metadata dictionary
"""
if not os.path.exists(LL_INTERNAL_METADATA_FILE):
return None
with open(LL_INTERNAL_METADATA_FILE) as f:
metadata = json.load(f)
return metadata
def parse_app_arn(app_arn):
"""Parse arn and return region and accountId info
Args:
app_arn: an ARN string
Returns: a dictionary
"""
if not app_arn:
return {}
# app arn format arn:aws:sagemaker:us-east-2:583903115294:app/...
splited = app_arn.split("/")[0].split(":")
return {
"Region": splited[3],
"AccountId": splited[4],
}
def _get_kgw_app_metadata():
"""Read Kernel Gateway App (KGW) metadata file from platform conventional place.
Example format:
```
{
"AppType": "KernelGateway",
"DomainId": "d-wbkeatwf4ga6",
"UserProfileName": "default-1607568379370",
"ResourceArn": "some arn",
"ResourceName": "sagemaker-data-wrang-ml-m5-4xlarge-54eebb9b8b7e2055bdd94fd073ee",
"AppImageVersion": ""
}
```
Returns: KGW metadata dictionary
"""
if not os.path.exists(KGW_APP_METADATA_FILE):
return None
with open(KGW_APP_METADATA_FILE) as f:
raw_metadata = json.load(f)
# parse accountId and region from app ARN and extend the raw metadata
return dict(raw_metadata, **parse_app_arn(raw_metadata.get("ResourceArn")))
def _get_application_stage(studio_metadata: dict):
if studio_metadata is not None:
return studio_metadata.get("Stage", PROD)
return DEVO
def _get_default_app_context(app_metadata=None):
app_context = {"ganymede_version": ".".join(map(str, version_info))}
if app_metadata is not None:
app_context["app_metadata"] = app_metadata
return app_context
STUDIO_METADATA = _get_studio_metadata()
KGW_METADATA = _get_kgw_app_metadata()
APP_CONTEXT = _get_default_app_context(app_metadata=KGW_METADATA)
STAGE = _get_application_stage(STUDIO_METADATA) | /sagemaker_datawrangler-0.4.3-py3-none-any.whl/sagemaker_datawrangler/logging/platform.py | 0.557604 | 0.241579 | platform.py | pypi |
import logging
import traceback
from copy import deepcopy
import pandas as pd
from joblib import Parallel, delayed
from sagemaker_datawrangler.logging.logging import get_metrics_logger
from sagemaker_datawrangler.logging.metrics import (
MetricsEventType,
create_structured_error_log,
)
from .data_quality_insights import (
generate_column_insight_for_missing_ratio,
generate_column_insights_based_on_unique_values,
generate_disguised_missing_values_insight,
parse_column_insights,
)
from .utils import get_column_highlights
metrics_logger = get_metrics_logger()
def get_df_column_insights(df: pd.DataFrame, selected_column_name: str = None):
"""
Calculate a pandas DataFrame all columns' statistics, quality issues and recommended transforms.
param: df: a pandas DataFrame
"""
from sagemaker_data_insights.analyzers.feature_analyzer import (
analyze_feature_column,
)
from sagemaker_data_insights.column_data_insights.column_insights_data import (
get_column_insights_data,
)
from .parser import parse_column_statistics
if selected_column_name and selected_column_name not in list(df.columns):
# selected column name is no longer in the dataframe, return early
return (
{selected_column_name: {}},
{selected_column_name: []},
{selected_column_name: {}},
)
if selected_column_name:
df = df[[selected_column_name]]
def _compute(column_name, column_data: pd.Series):
try:
column_profiles, column_stats = get_column_insights_data(
column_name, column_data
)
column_insights = []
parsed_column_stats = parse_column_statistics(column_profiles, column_data)
if column_profiles.get("logicalDataType") in ["numeric", "categorical"]:
column_insights = analyze_feature_column(
column_data,
column_profiles.get("logicalDataType"),
column_stats[column_name],
)
if column_insights and column_insights.get("insights") is not None:
column_insights = parse_column_insights(
column_name, column_insights["insights"], column_data
)
column_insights = [
*column_insights,
*generate_column_insight_for_missing_ratio(
column_statistics=parsed_column_stats,
column_name=column_name,
),
*generate_disguised_missing_values_insight(
df, column_name=column_name, column_statistics=parsed_column_stats
),
*generate_column_insights_based_on_unique_values(
df, column_name=column_name
),
]
except Exception as exception:
metrics_logger.error(
create_structured_error_log(
exception=exception,
event_type=MetricsEventType.COMPUTE_COLUMN_INSIGHTS_DF.value,
message=f"Failed to profile column while computing insights",
)
)
# reassign the initial values to column_stats and column_insights
parsed_column_stats = {}
column_insights = []
return {
"column_stats": parsed_column_stats,
"column_insights": column_insights,
"column_name": column_name,
"column_highlights": get_column_highlights(
column_name, column_data, deepcopy(column_insights)
),
}
compute_results = Parallel(n_jobs=-1)(
delayed(_compute)(column_name, column_data)
for column_name, column_data in df.items()
)
# collect and return insights results
df_column_statistics = {}
df_column_insights = {}
df_column_highlights = {}
for result in compute_results:
df_column_statistics[result["column_name"]] = result["column_stats"]
df_column_insights[result["column_name"]] = result["column_insights"]
df_column_highlights[result["column_name"]] = result["column_highlights"]
return df_column_statistics, df_column_insights, df_column_highlights | /sagemaker_datawrangler-0.4.3-py3-none-any.whl/sagemaker_datawrangler/insights/column_insights.py | 0.442155 | 0.327346 | column_insights.py | pypi |
from typing import List
import pandas as pd
from sagemaker_datawrangler.transformers.utils import get_rare_categories
from .feature_column_insights_schema import FEATURE_COLUMN_INSIGHTS_INFO
from .insights_constants import (
Insights,
InsightsInfo,
InsightsSeverity,
InsightsThresholds,
)
from .target_column_insights_schema import TARGET_COLUMN_INSIGHTS_INFO
from .utils import get_transforms_for_missing_insights, parse_insight_id, set_examples
INSIGHTS_INFO_MAP = {**FEATURE_COLUMN_INSIGHTS_INFO, **TARGET_COLUMN_INSIGHTS_INFO}
INSIGHTS_TO_REMOVE = [
Insights.TARGET_OUTLIERS,
Insights.SKEWED_TARGET,
Insights.HEAVY_TAILED_TARGET,
Insights.NUMERIC_DISGUISED_MISSING_VALUE,
Insights.CATEGORICAL_RARE,
]
class Warning(object):
# TODO: Define Map with all supported warnings
def __init__(
self, insight_id: str, severity: str = None, name: str = None, info: dict = None
):
insight_info = INSIGHTS_INFO_MAP.get(insight_id)
self.insight_id = insight_id
self.severity = severity if severity else insight_info.get("severity")
self.info = info
self.name = name
if insight_info:
self.description = insight_info.get("description")
self.name = insight_info.get("name", name)
info_key = insight_info.get("info_key")
if info and info_key:
values = self.info.get(info_key)
self.example = set_examples(insight_info.get("example_prefix"), values)
class ColumnInsight(object):
def __init__(self, feature_name: str, warnings: Warning, operators=None):
self.feature_name = feature_name
self.warnings = vars(warnings)
self.operators = (
INSIGHTS_INFO_MAP[warnings.insight_id]["operators"]
if not operators
else operators
)
def generate_column_insight_for_missing_ratio(column_statistics, column_name):
missing_ratio = column_statistics["missingRatio"]
if missing_ratio == 0:
return []
recommended_transforms = get_transforms_for_missing_insights(
column_statistics, missing_ratio
)
warning = Warning(
insight_id=Insights.HIGH_MISSING_RATIO,
info={InsightsInfo.MISSING_VALUES: [None]},
)
column_insights = vars(
ColumnInsight(
feature_name=column_name, warnings=warning, operators=recommended_transforms
)
)
return [column_insights]
def generate_disguised_missing_values_insight(df, column_name, column_statistics):
unique_values = df[column_name].unique()
disguised_missing_values = []
for value in unique_values:
if isinstance(value, str):
# handle cases where there are leading or trailing spaces for common missing values
if value.upper().strip() in InsightsThresholds.DISGUISED_MISSING_VALUES:
disguised_missing_values.append(value)
if disguised_missing_values:
recommended_transforms = get_transforms_for_missing_insights(
column_statistics, insight=Insights.DISGUISED_MISSING_VALUES
)
warning = Warning(
insight_id=Insights.DISGUISED_MISSING_VALUES,
severity=InsightsSeverity.MEDIUM_FEATURE,
info={
InsightsInfo.DISGUISED_MISSING_VALUES: list(disguised_missing_values)
},
)
column_insights = vars(
ColumnInsight(
feature_name=column_name,
warnings=warning,
operators=recommended_transforms,
)
)
return [column_insights]
return []
def generate_column_insights_based_on_unique_values(df, column_name):
n_unique, n_rows = len(pd.unique(df[column_name])), df[column_name].size
column_insights = None
if n_unique == 1:
warning = Warning(
insight_id=Insights.CONSTANT_COLUMN,
)
column_insights = vars(
ColumnInsight(feature_name=column_name, warnings=warning)
)
elif n_unique == n_rows:
warning = Warning(
insight_id=Insights.ID_COLUMN,
)
column_insights = vars(
ColumnInsight(feature_name=column_name, warnings=warning)
)
elif n_unique / n_rows > InsightsThresholds.HIGH_CARDINALITY:
warning = Warning(insight_id=Insights.HIGH_CARDINALITY)
column_insights = vars(
ColumnInsight(feature_name=column_name, warnings=warning)
)
return [column_insights] if column_insights else []
def parse_column_insights(
column_name: str, insights: List, column_data: pd.Series = None
):
"""
Parse the raw column insights data for frontend usage
Related logic in DW: https://tiny.amazon.com/102mzxuy8/githawssageblobmastsrcsage
param: column_name: DataFrame column name (unique) e.g. 'boat'
param: column_insights: an array of insights of the column from SageMakerDataInsights
"""
parsed_column_insights = []
for insight in insights:
insight_id = parse_insight_id(insight["key"])
# Remove Outliers in target, Skewness in target, Heavy tailed target
if insight_id in INSIGHTS_TO_REMOVE:
continue
# temporary as data insights does not return rare categories correctly
if insight_id == Insights.CATEGORICAL_RARE and column_data is not None:
insight["info"] = {}
insight_info_key = INSIGHTS_INFO_MAP.get(insight_id).get("info_key")
insight["info"][insight_info_key] = get_rare_categories(
pd.DataFrame({column_name: column_data}), column_name
)
warning = Warning(
insight_id=insight_id,
severity=insight["severity"],
info=insight.get("info"),
name=insight["key"],
)
column_insights = vars(
ColumnInsight(feature_name=column_name, warnings=warning)
)
parsed_column_insights.append(column_insights)
return parsed_column_insights | /sagemaker_datawrangler-0.4.3-py3-none-any.whl/sagemaker_datawrangler/insights/data_quality_insights.py | 0.425128 | 0.211559 | data_quality_insights.py | pypi |
import logging
import pandas as pd
from sagemaker_datawrangler.insights.data_quality_insights import ColumnInsight, Warning
from sagemaker_datawrangler.logging.logging import get_metrics_logger
from sagemaker_datawrangler.logging.metrics import (
MetricsEventType,
create_structured_error_log,
)
metrics_logger = get_metrics_logger()
def _group_multiple_target_insights(target_insight: list) -> list:
"""
Convert multiple target insights into a single insight with aggregated information for each label
"""
from sagemaker_data_insights.insights import Insights
aggregated_insights = []
skewed_target_label_info = []
rare_target_label_info = []
for insight in target_insight:
if insight["warnings"]["name"] == Insights.IMBALANCED_CLASSES:
skewed_target_insight = {
"feature_name": insight["feature_name"],
"operators": insight["operators"],
"warnings": insight["warnings"],
}
skewed_target_label_info.append(insight["warnings"]["info"])
elif insight["warnings"]["name"] == Insights.RARE_TARGET_LABEL:
rare_target_label_insight = {
"feature_name": insight["feature_name"],
"operators": insight["operators"],
"warnings": insight["warnings"],
}
rare_target_label_info.append(insight["warnings"]["info"])
else:
aggregated_insights.append(insight)
if skewed_target_label_info:
if len(skewed_target_label_info) == 1:
skewed_target_insight["warnings"]["info"] = skewed_target_label_info[0]
else:
skewed_target_insight["warnings"] = {
"insight_id": "sagemaker.data_quality.classes_too_imbalanced",
"severity": "med_sev",
"name": "Classes too imbalanced",
}
label = []
count = []
most_frequent_label = skewed_target_label_info[0]["most_frequent_label"]
most_frequent_label_count = skewed_target_label_info[0][
"most_frequent_label_count"
]
for info in skewed_target_label_info:
label.append(info["label"])
count.append(info["count"])
skewed_target_insight["warnings"]["info"] = {
"label": label,
"count": count,
"most_frequent_label": most_frequent_label,
"most_frequent_label_count": most_frequent_label_count,
}
# Process warning
skewed_target_insight["warnings"] = vars(
Warning(
insight_id=skewed_target_insight["warnings"]["insight_id"],
severity=skewed_target_insight["warnings"]["severity"],
name=skewed_target_insight["warnings"]["name"],
info=skewed_target_insight["warnings"]["info"],
)
)
aggregated_insights.append(skewed_target_insight)
if rare_target_label_info:
if len(rare_target_label_info) == 1:
rare_target_label_insight["warnings"]["info"] = rare_target_label_info[0]
else:
rare_target_label_insight["warnings"] = {
"insight_id": "sagemaker.data_quality.too_few_instances_per_class",
"severity": "high_sev",
"name": "Too few instances per class",
}
label = []
count = []
for info in rare_target_label_info:
label.append(info["label"])
count.append(info["count"])
rare_target_label_insight["warnings"]["info"] = {
"label": label,
"count": count,
}
rare_target_label_insight["warnings"] = vars(
Warning(
insight_id=rare_target_label_insight["warnings"]["insight_id"],
severity=rare_target_label_insight["warnings"]["severity"],
name=rare_target_label_insight["warnings"]["name"],
info=rare_target_label_insight["warnings"]["info"],
)
)
aggregated_insights.append(rare_target_label_insight)
return aggregated_insights
def analyze_target(
df: pd.DataFrame, problem_type: str, target_column_name: str
) -> dict:
"""
Target column analyzers to run analysis on target column insights and recommend transforms
Params:
df: pd.DataFrame pandas data frame
problem_type: str, Classfication or Regression
target_column_name: str, name of the target column
Returns:
dict:
target_column_name: a list of data insights with recommended transforms
Example:
{'target': [{'feature_name': 'target', 'warnings': {'name': 'Rare target label', 'insight_id': 'sagemaker.data_quality.rare_target_label', 'severity': 'high_sev', 'info': {'label': 'c', 'count': 15}, 'description': 'Rare target label'}, 'operators': [{'operator_id': 'sagemaker.spark.handle_outliers_0.1', 'transformer_name': 'Replace rare values'}]}, {'feature_name': 'target', 'warnings': {'name': 'Rare target label', 'insight_id': 'sagemaker.data_quality.rare_target_label', 'severity': 'high_sev', 'info': {'label': 'a', 'count': 5}, 'description': 'Rare target label'}, 'operators': [{'operator_id': 'sagemaker.spark.handle_outliers_0.1', 'transformer_name': 'Replace rare values'}]}]}
"""
from sagemaker_data_insights.analyzers.target_column_analyzer import (
analyze_target_classification,
analyze_target_regression,
)
from sagemaker_data_insights.calc_stats_pandas_series import (
_calc_stats_pandas_series,
)
from sagemaker_data_insights.const import TaskType
from sagemaker_datawrangler.insights.data_quality_insights import (
parse_column_insights,
)
if problem_type not in [TaskType.CLASSIFICATION, TaskType.REGRESSION]:
metrics_logger.error(
create_structured_error_log(
exception=None,
event_type=MetricsEventType.COMPUTE_TARGET_INSIGHTS.value,
message=f"Invalid problem type {problem_type}",
)
)
return
if target_column_name not in list(df.columns):
logging.warning(
f"Target column {target_column_name} not found in the DataFrame"
)
return
target_col_metrics = _calc_stats_pandas_series(df[target_column_name])
target_analyzer = (
analyze_target_regression
if problem_type == TaskType.REGRESSION
else analyze_target_classification
)
target_insights, _ = target_analyzer(df[target_column_name], target_col_metrics)
parsed_insights = parse_column_insights(
target_column_name, target_insights["insights"]
)
# Group multiple instances of the same target insight
grouped_insights = _group_multiple_target_insights(parsed_insights)
return {target_column_name: grouped_insights} | /sagemaker_datawrangler-0.4.3-py3-none-any.whl/sagemaker_datawrangler/insights/target_column_insights.py | 0.496582 | 0.261987 | target_column_insights.py | pypi |
from sagemaker_datawrangler.transformers.constants import OPERATORS, TRANSFORMER_NAMES
from .insights_constants import Insights, InsightsInfo, InsightsSeverity
# Insights related to target column
TARGET_COLUMN_INSIGHTS_INFO = {
Insights.SKEWED_TARGET: {
"name": "Skewness in target",
"description": "The target column has a univariate distribution. The distribution might be skewed. Consider dropping the outliers.",
"severity": InsightsSeverity.HIGH,
"operators": [
{
"operator_id": OPERATORS.DROP_OUTLIERS,
"transformer_name": TRANSFORMER_NAMES.HANDLE_NUMERIC_OUTLIERS,
"transform_description": "Drops numeric values that are three standard errors away from the mean.",
}
],
},
Insights.RARE_TARGET_LABEL: {
"info_key": InsightsInfo.RARE_TARGET_LABEL,
"name": "Too few instances per class",
"description": "The target column has categories that appear rarely. You can replace the categories with the string, 'Other'. For numeric values, consider replacing them with a new number that does not exist in the target column.",
"severity": InsightsSeverity.HIGH,
"example_prefix": "The following are the rare categories in the target column ",
"operators": [
{
"operator_id": OPERATORS.REPLACE_RARE_TARGET,
"transformer_name": TRANSFORMER_NAMES.REPLACE_RARE_TARGET_VALUES,
"transformer_description": "Replaces rare target values with 'Other' or a new number.",
},
{
"operator_id": OPERATORS.DROP_RARE_TARGET,
"transformer_name": TRANSFORMER_NAMES.DROP_RARE_TARGET_VALUES,
"transformer_description": "Drops rare target values whose count is less than 10.",
},
],
},
Insights.HIGH_TARGET_CARDINALITY: {
"info_key": InsightsInfo.TARGET_CARDINALITY,
"name": "Too many classes",
"severity": InsightsSeverity.LOW,
"description": "There are many classes in the target column. Having many classes may result in longer training time or poor predicative quality. You may consider converting it to a regression task.",
"example_prefix": "The number of classes in the target column is ",
"operators": [],
},
Insights.IMBALANCED_CLASSES: {
"info_key": InsightsInfo.IMBALANCED_CLASSES,
"name": "Classes too imbalanced",
"severity": InsightsSeverity.MEDIUM,
"description": "There are categories in your dataset that appear much more frequently than other categories. The class imbalance might affect prediction accuracy. For accurate predictions, we recommend updating the dataset with rows that have the categories that appear less frequently.",
"example_prefix": "The infrequent labels are ",
"operators": [
{
"operator_id": OPERATORS.REPLACE_RARE_TARGET,
"transformer_name": TRANSFORMER_NAMES.REPLACE_RARE_TARGET_VALUES,
"transform_description": "Replace rare target values with 'Other', or a new number.",
},
{
"operator_id": OPERATORS.DROP_RARE_TARGET,
"transformer_name": TRANSFORMER_NAMES.DROP_RARE_TARGET_VALUES,
"transform_description": "Drop rare target values whose count is less than 1% of the target column.",
},
],
},
Insights.HIGHLY_SKEWED_MINORITY: {
"info_key": InsightsInfo.HIGHLY_SKEWED_MINORITY,
"name": "Highly skewed minority",
"severity": InsightsSeverity.HIGH,
"description": "The minority label count is very low. The skew might affect prediction accuracy. For accurate predictions, we recommend upsampling or synthetic sampling using SMOTE.",
"example_prefix": "The minority label is ",
"operators": [],
},
Insights.REGRESSION_FREQUENT_LABEL: {
"info_key": InsightsInfo.REGRESSION_FREQUENT_LABEL,
"name": "Frequent label",
"description": "The frequency of the label in the target column is uncommon for regression tasks. This could point to bugs in data collection or processing. In some cases, the very frequent label is a default value or a placeholder to indicate missing values. If that's the case, consider replace the value with NaN.",
"example_prefix": "The frequent label is ",
"severity": InsightsSeverity.MEDIUM,
"operators": [
{
"operator_id": OPERATORS.CONVERT_REGEX_TO_MISSING,
"transformer_name": TRANSFORMER_NAMES.REPLACE_WITH_NAN,
"transformer_description": "Replace categories that can't be converted to numeric values with NaNs",
}
],
},
Insights.HEAVY_TAILED_TARGET: {
"name": "Heavy tailed target",
"description": "The target column is heavy tailed and contains outliers.",
"severity": InsightsSeverity.MEDIUM,
},
Insights.NON_NUMERIC_VALUES: {
"info_key": InsightsInfo.NON_NUMERIC_VALUES,
"name": "Many non-numeric values",
"description": "There are some categories in the target column can't be converted to numeric values. There might be data entry errors. We recommend removing the rows containing the values that can't be converted from the dataset.",
"severity": InsightsSeverity.HIGH,
"example_prefix": "The non numeric values are ",
"operators": [
{
"operator_id": OPERATORS.CONVERT_TO_NUMERIC_AND_DROP_MISSING,
"transformer_name": TRANSFORMER_NAMES.CONVERT_TO_NUMERIC_AND_DROP_NAN,
"transform_description": "Convert invalid numerical values to NaN and drop. ",
},
],
},
Insights.TARGET_OUTLIERS: {
"name": "Outliers in target",
"description": "The target column has several outliers. We recommend dropping them for more accurate predictions.",
"severity": InsightsSeverity.MEDIUM,
"operators": [
{
"operator_id": OPERATORS.DROP_OUTLIERS,
"transformer_name": TRANSFORMER_NAMES.HANDLE_NUMERIC_OUTLIERS,
"transform_description": "Drops numeric values that are three standard errors away from the mean.",
}
],
},
} | /sagemaker_datawrangler-0.4.3-py3-none-any.whl/sagemaker_datawrangler/insights/target_column_insights_schema.py | 0.621885 | 0.610134 | target_column_insights_schema.py | pypi |
"""Contains the SageMaker Experiment class."""
from smexperiments import _base_types, api_types, trial, _utils, trial_component
import time
class Experiment(_base_types.Record):
"""
An Amazon SageMaker experiment, which is a collection of related trials.
New experiments are created by calling :meth:`~smexperiments.experiment.Experiment.create`. Existing
experiments can be reloaded by calling :meth:`~smexperiments.experiment.Experiment.load`. You can
add a new trial to an Experiment by calling :meth:`~smexperiments.experiment.Experiment.create_trial`.
To remove an experiment and associated trials, trial components by calling :meth:`~smexperiments.experiment
.Experiment.delete_all`.
Examples:
.. code-block:: python
from smexperiments import experiment
my_experiment = experiment.Experiment.create(experiment_name='AutoML')
my_trial = my_experiment.create_trial(trial_name='random-forest')
for exp in experiment.Experiment.list():
print(exp)
for trial in my_experiment.list_trials():
print(trial)
my_experiment.delete_all(action="--force")
Parameters:
experiment_name (str): The name of the experiment. The name must be unique within an account.
description (str): A description of the experiment.
tags (List[dict[str, str]]): A list of tags to associate with the experiment.
"""
experiment_name = None
description = None
tags = None
_boto_create_method = "create_experiment"
_boto_load_method = "describe_experiment"
_boto_update_method = "update_experiment"
_boto_delete_method = "delete_experiment"
_boto_update_members = ["experiment_name", "description", "display_name"]
_boto_delete_members = ["experiment_name"]
MAX_DELETE_ALL_ATTEMPTS = 3
def save(self):
"""Save the state of this Experiment to SageMaker.
Returns:
dict: Update experiment API response.
"""
return self._invoke_api(self._boto_update_method, self._boto_update_members)
def delete(self):
"""Delete this Experiment from SageMaker.
Deleting an Experiment requires that each Trial in the Experiment is first deleted.
Returns:
dict: Delete experiment API response.
"""
return self._invoke_api(self._boto_delete_method, self._boto_delete_members)
@classmethod
def load(cls, experiment_name, sagemaker_boto_client=None):
"""
Load an existing experiment and return an ``Experiment`` object representing it.
Args:
experiment_name: (str): Name of the experiment
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker.
If not supplied, a default boto3 client will be created and used.
Returns:
sagemaker.experiments.experiment.Experiment: A SageMaker ``Experiment`` object
"""
return cls._construct(
cls._boto_load_method,
experiment_name=experiment_name,
sagemaker_boto_client=sagemaker_boto_client,
)
@classmethod
def create(cls, experiment_name=None, description=None, tags=None, sagemaker_boto_client=None):
"""
Create a new experiment in SageMaker and return an ``Experiment`` object.
Args:
experiment_name: (str): Name of the experiment. Must be unique. Required.
experiment_description: (str, optional): Description of the experiment
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker. If not
supplied, a default boto3 client will be created and used.
tags (List[dict[str, str]]): A list of tags to associate with the experiment.
Returns:
sagemaker.experiments.experiment.Experiment: A SageMaker ``Experiment`` object
"""
return cls._construct(
cls._boto_create_method,
experiment_name=experiment_name,
description=description,
tags=tags,
sagemaker_boto_client=sagemaker_boto_client,
)
@classmethod
def list(
cls,
created_before=None,
created_after=None,
sort_by=None,
sort_order=None,
sagemaker_boto_client=None,
):
"""
List experiments. Returns experiments in the account matching the specified criteria.
Args:
created_before: (datetime.datetime, optional): Return experiments created before this
instant.
created_after: (datetime.datetime, optional): Return experiments created after this
instant.
sort_by (str, optional): Which property to sort results by. One of
'Name', 'CreationTime'.
sort_order (str, optional): One of 'Ascending', or 'Descending'.
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker. If not
supplied, a default boto3 client will be used.
Returns:
collections.Iterator[sagemaker.experiments.api_types.ExperimentSummary] : An iterator
over experiment summaries matching the specified criteria.
"""
return super(Experiment, cls)._list(
"list_experiments",
api_types.ExperimentSummary.from_boto,
"ExperimentSummaries",
created_before=created_before,
created_after=created_after,
sort_by=sort_by,
sort_order=sort_order,
sagemaker_boto_client=sagemaker_boto_client,
)
@classmethod
def search(
cls,
search_expression=None,
sort_by=None,
sort_order=None,
max_results=None,
sagemaker_boto_client=None,
):
"""
Search experiments. Returns SearchResults in the account matching the search criteria.
Args:
search_expression: (dict, optional): A Boolean conditional statement. Resource objects
must satisfy this condition to be included in search results. You must provide at
least one subexpression, filter, or nested filter.
sort_by (str, optional): The name of the resource property used to sort the SearchResults.
The default is LastModifiedTime
sort_order (str, optional): How SearchResults are ordered. Valid values are Ascending or
Descending . The default is Descending .
max_results (int, optional): The maximum number of results to return in a SearchResponse.
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker. If not
supplied, a default boto3 client will be used.
Returns:
collections.Iterator[SearchResult] : An iterator over search results matching the search criteria.
"""
return super(Experiment, cls)._search(
search_resource="Experiment",
search_item_factory=api_types.ExperimentSearchResult.from_boto,
search_expression=None if search_expression is None else search_expression.to_boto(),
sort_by=sort_by,
sort_order=sort_order,
max_results=max_results,
sagemaker_boto_client=sagemaker_boto_client,
)
def list_trials(self, created_before=None, created_after=None, sort_by=None, sort_order=None):
"""List trials in this experiment matching the specified criteria.
Args:
created_before (datetime.datetime, optional): Return trials created before this instant.
created_after (datetime.datetime, optional): Return trials created after this instant.
sort_by (str, optional): Which property to sort results by. One of 'Name',
'CreationTime'.
sort_order (str, optional): One of 'Ascending', or 'Descending'.
Returns:
collections.Iterator[sagemaker.experiments.api_types.TrialSummary] : An iterator over
trials matching the criteria.
"""
return trial.Trial.list(
experiment_name=self.experiment_name,
created_before=created_before,
created_after=created_after,
sort_by=sort_by,
sort_order=sort_order,
sagemaker_boto_client=self.sagemaker_boto_client,
)
def create_trial(self, trial_name=None, trial_name_prefix="SageMakerTrial"):
"""Create a trial in this experiment.
Since trial names are expected to be unique in an account, ``trial_name_prefix`` can be provided
instead of ``trial_name``. In this case a unique name will be generated that begins with the specified
prefix.
Args:
trial_name (str): Name of the trial.
trial_name_prefix (str): Prefix for the trial name if you want SageMaker to
auto-generate the trial name.
Returns:
sagemaker.experiments.trial.Trial : A SageMaker ``Trial`` object representing the
created trial.
"""
if not trial_name:
trial_name = _utils.name(trial_name_prefix)
return trial.Trial.create(
trial_name=trial_name,
experiment_name=self.experiment_name,
sagemaker_boto_client=self.sagemaker_boto_client,
)
def delete_all(self, action):
"""
Force to delete the experiment and associated trials, trial components under the experiment.
Args:
action (str): pass in string '--force' to confirm recursively delete all the experiments, trials,
and trial components.
"""
if action != "--force":
raise ValueError(
"Must confirm with string '--force' in order to delete the experiment and "
"associated trials, trial components."
)
delete_attempt_count = 0
last_exception = None
while True:
if delete_attempt_count == self.MAX_DELETE_ALL_ATTEMPTS:
raise Exception("Failed to delete, please try again.") from last_exception
try:
for trial_summary in self.list_trials():
t = trial.Trial.load(
sagemaker_boto_client=self.sagemaker_boto_client, trial_name=trial_summary.trial_name
)
for trial_component_summary in t.list_trial_components():
tc = trial_component.TrialComponent.load(
sagemaker_boto_client=self.sagemaker_boto_client,
trial_component_name=trial_component_summary.trial_component_name,
)
tc.delete(force_disassociate=True)
# to prevent throttling
time.sleep(1.2)
t.delete()
# to prevent throttling
time.sleep(1.2)
self.delete()
break
except Exception as ex:
last_exception = ex
finally:
delete_attempt_count = delete_attempt_count + 1 | /sagemaker_experiments-0.1.45-py3-none-any.whl/smexperiments/experiment.py | 0.936742 | 0.336086 | experiment.py | pypi |
"""Placeholder docstring"""
from smexperiments import _boto_functions, _utils
class ApiObject(object):
"""
A Python class representation of a boto API object. Converts boto dicts of 'UpperCamelCase' names
to dicts into/from a Python object with standard python members. Clients invoke to_boto on an instance
of ApiObject to transform the ApiObject into a boto representation. Clients invoke from_boto on a sub-class of
ApiObject to instantiate an instance of that class from a boto representation.
"""
# A map from boto 'UpperCamelCase' name to member name. If a boto name does not appear in this dict then
# it is converted to lower_snake_case.
_custom_boto_names = {}
# A map from name to an ApiObject subclass. Allows ApiObjects to contain ApiObject members.
_custom_boto_types = {}
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@classmethod
def _boto_ignore(cls):
return ["ResponseMetadata"]
@classmethod
def from_boto(cls, boto_dict, **kwargs):
"""Construct an instance of this ApiObject from a boto response.
Args:
boto_dict (dict): A dictionary of a boto response.
**kwargs: Arbitrary keyword arguments
"""
boto_dict = {k: v for k, v in boto_dict.items() if k not in cls._boto_ignore()}
custom_boto_names_to_member_names = {a: b for b, a in cls._custom_boto_names.items()}
cls_kwargs = _boto_functions.from_boto(boto_dict, custom_boto_names_to_member_names, cls._custom_boto_types)
cls_kwargs.update(kwargs)
return cls(**cls_kwargs)
@classmethod
def to_boto(cls, obj):
"""Convert an object to a boto representation.
Args:
obj (dict): The object to convert to boto.
"""
if not isinstance(obj, dict):
var_dict = vars(obj)
else:
var_dict = obj
return _boto_functions.to_boto(var_dict, cls._custom_boto_names, cls._custom_boto_types)
def __eq__(self, other):
"""Returns true if this ApiObject equals other."""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
"""Returns true if this ApiObject does not equal other."""
return not self.__eq__(other)
def __hash__(self):
"""Returns a hashcode for this ApiObject."""
return hash(tuple(sorted(self.__dict__.items())))
def __repr__(self):
"""Returns a string representation of this ApiObject."""
return "{}({})".format(
type(self).__name__,
",".join(["{}={}".format(k, repr(v)) for k, v in vars(self).items()]),
)
class Record(ApiObject):
"""A boto based Active Record class based on convention over Create/Read/Update/Delete operations."""
# update / delete / list method names
_boto_update_method = None
_boto_delete_method = None
_boto_list_method = None
# List of member names to convert to boto representations and pass to the update method.
_boto_update_members = []
# List of member names to convert to boto representations and pass to the delete method.
_boto_delete_members = []
def __init__(self, sagemaker_boto_client, **kwargs):
self.sagemaker_boto_client = sagemaker_boto_client
super(Record, self).__init__(**kwargs)
@classmethod
def _list(
cls,
boto_list_method,
list_item_factory,
boto_list_items_name,
boto_next_token_name="NextToken",
sagemaker_boto_client=None,
**kwargs
):
sagemaker_boto_client = sagemaker_boto_client or _utils.sagemaker_client()
next_token = None
try:
while True:
list_request_kwargs = _boto_functions.to_boto(kwargs, cls._custom_boto_names, cls._custom_boto_types)
if next_token:
list_request_kwargs[boto_next_token_name] = next_token
list_method = getattr(sagemaker_boto_client, boto_list_method)
list_method_response = list_method(**list_request_kwargs)
list_items = list_method_response.get(boto_list_items_name, [])
next_token = list_method_response.get(boto_next_token_name)
for item in list_items:
yield list_item_factory(item)
if not next_token:
break
except StopIteration:
return
@classmethod
def _search(
cls,
search_resource,
search_item_factory,
boto_next_token_name="NextToken",
sagemaker_boto_client=None,
**kwargs
):
sagemaker_boto_client = sagemaker_boto_client or _utils.sagemaker_client()
next_token = None
try:
while True:
search_request_kwargs = _boto_functions.to_boto(kwargs, cls._custom_boto_names, cls._custom_boto_types)
search_request_kwargs["Resource"] = search_resource
if next_token:
search_request_kwargs[boto_next_token_name] = next_token
search_method = getattr(sagemaker_boto_client, "search")
search_method_response = search_method(**search_request_kwargs)
search_items = search_method_response.get("Results", [])
next_token = search_method_response.get(boto_next_token_name)
for item in search_items:
if cls.__name__ in item:
yield search_item_factory(item[cls.__name__])
if not next_token:
break
except StopIteration:
return
@classmethod
def _construct(cls, boto_method_name, sagemaker_boto_client=None, **kwargs):
sagemaker_boto_client = sagemaker_boto_client or _utils.sagemaker_client()
instance = cls(sagemaker_boto_client, **kwargs)
return instance._invoke_api(boto_method_name, kwargs)
def with_boto(self, boto_dict):
"""Update this ApiObject with a boto response.
Args:
boto_dict (dict): A dictionary of a boto response.
"""
custom_boto_names_to_member_names = {a: b for b, a in self._custom_boto_names.items()}
self.__dict__.update(
**_boto_functions.from_boto(boto_dict, custom_boto_names_to_member_names, self._custom_boto_types)
)
return self
def _invoke_api(self, boto_method, boto_method_members):
api_values = {k: v for k, v in vars(self).items() if k in boto_method_members}
api_kwargs = self.to_boto(api_values)
api_method = getattr(self.sagemaker_boto_client, boto_method)
api_boto_response = api_method(**api_kwargs)
return self.with_boto(api_boto_response) | /sagemaker_experiments-0.1.45-py3-none-any.whl/smexperiments/_base_types.py | 0.795777 | 0.483648 | _base_types.py | pypi |
"""Metrics module"""
import datetime
import json
import logging
import os
import time
import dateutil.tz
METRICS_DIR = os.environ.get("SAGEMAKER_METRICS_DIRECTORY", ".")
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class SageMakerFileMetricsWriter(object):
"""Writes metric data to file."""
def __init__(self, metrics_file_path=None):
self._metrics_file_path = metrics_file_path
self._file = None
self._closed = False
def log_metric(self, metric_name, value, timestamp=None, iteration_number=None):
"""Write a metric to file.
Args:
metric_name (str): The name of the metric.
value (str): The value of the metric.
timestamp (datetime): Timestamp of the metric.
iteration_number (int): Iteration number of the metric.
Raises:
SageMakerMetricsWriterException: If the metrics file is closed.
AttributeError: If file has been initialized and the writer hasn't been closed.
"""
raw_metric_data = _RawMetricData(
metric_name=metric_name, value=value, timestamp=timestamp, iteration_number=iteration_number
)
try:
logging.debug("Writing metric: %s", raw_metric_data)
self._file.write(json.dumps(raw_metric_data.to_record()))
self._file.write("\n")
except AttributeError:
if self._closed:
raise SageMakerMetricsWriterException("log_metric called on a closed writer")
elif not self._file:
self._file = open(self._get_metrics_file_path(), "a", buffering=1)
self._file.write(json.dumps(raw_metric_data.to_record()))
self._file.write("\n")
else:
raise
def close(self):
"""Closes the metric file."""
if not self._closed and self._file:
self._file.close()
self._file = None # invalidate reference, causing subsequent log_metric to fail.
self._closed = True
def __enter__(self):
"""Return self"""
return self
def __exit__(self, type, value, traceback):
"""Execute self.close()"""
self.close()
def __del__(self):
"""Execute self.close()"""
self.close()
def _get_metrics_file_path(self):
pid_filename = "{}.json".format(str(os.getpid()))
metrics_file_path = self._metrics_file_path or os.path.join(METRICS_DIR, pid_filename)
logging.debug("metrics_file_path=" + metrics_file_path)
return metrics_file_path
class SageMakerMetricsWriterException(Exception):
"""SageMakerMetricsWriterException"""
def __init__(self, message, errors=None):
super().__init__(message)
if errors:
self.errors = errors
class _RawMetricData(object):
MetricName = None
Value = None
Timestamp = None
IterationNumber = None
def __init__(self, metric_name, value, timestamp=None, iteration_number=None):
if timestamp is None:
timestamp = time.time()
elif isinstance(timestamp, datetime.datetime):
# If the input is a datetime then convert it to UTC time. Assume a naive datetime is in local timezone
if not timestamp.tzinfo:
timestamp = timestamp.replace(tzinfo=dateutil.tz.tzlocal())
timestamp = (timestamp - timestamp.utcoffset()).replace(tzinfo=datetime.timezone.utc)
timestamp = timestamp.timestamp()
else:
timestamp = float(timestamp)
if timestamp < (time.time() - 1209600) or timestamp > (time.time() + 7200):
raise ValueError(
"Supplied timestamp %f is invalid."
" Timestamps must be between two weeks before and two hours from now." % timestamp
)
value = float(value)
self.MetricName = metric_name
self.Value = float(value)
self.Timestamp = timestamp
if iteration_number is not None:
assert isinstance(iteration_number, int)
self.IterationNumber = iteration_number
def to_record(self):
return self.__dict__
def __str__(self):
return repr(self)
def __repr__(self):
return "{}({})".format(
type(self).__name__,
",".join(["{}={}".format(k, repr(v)) for k, v in vars(self).items()]),
) | /sagemaker_experiments-0.1.45-py3-none-any.whl/smexperiments/metrics.py | 0.859958 | 0.178848 | metrics.py | pypi |
"""Contains API objects for SageMaker experiments."""
import numbers
from smexperiments import _base_types
class ExperimentSummary(_base_types.ApiObject):
"""Summary model of an experiment.
Attributes:
experiment_arn (str): ARN of the experiment.
experiment_name (str): Name of the experiment.
creation_time (datetime): Date experiment was created.
last_modified_time (datetime): Date experiment was last modified.
"""
experiment_arn = None
experiment_name = None
creation_time = None
last_modified_time = None
def __init__(self, experiment_name=None, experiment_arn=None, **kwargs):
super(ExperimentSummary, self).__init__(
experiment_name=experiment_name, experiment_arn=experiment_arn, **kwargs
)
class TrialComponentMetricSummary(_base_types.ApiObject):
"""Summary model of a trial component.
Attributes:
metric_name (str): The name of the metric.
source_arn (str): The ARN of the source.
time_stamp (datetime): Metric last updated value.
logical_time (datetime):
max (float): The max value of the metric.
min (float): The min value of the metric.
last (float): The last value of the metric.
count (float): The number of samples used to generate the metric.
avg (float): The average value of the metric.
std_dev (float): The standard deviation of the metric.
"""
metric_name = None
source_arn = None
time_stamp = None
logical_time = None
max = None
min = None
last = None
count = None
avg = None
std_dev = None
def __init__(self, metric_name=None, source_arn=None, **kwargs):
super(TrialComponentMetricSummary, self).__init__(metric_name=metric_name, source_arn=source_arn, **kwargs)
class TrialSummary(_base_types.ApiObject):
"""Summary model of a trial.
Attributes:
trial_arn (str): The ARN of the trial.
trial_name (str): The name of the trial.
creation_time (datetime): When the trial was created.
last_modified_time (datetime): When the trial was last modified.
"""
trial_arn = None
trial_name = None
creation_time = None
last_modified_time = None
def __init__(self, trial_name=None, trial_arn=None, **kwargs):
super(TrialSummary, self).__init__(trial_name=trial_name, trial_arn=trial_arn, **kwargs)
class TrialComponentParameterValue(_base_types.ApiObject):
"""Value of a trial component parameter.
Attributes:
string_value (str): The string value of the parameter value.
number_value (float): The number value of the parameter value if applicable.
"""
string_value = None
number_value = None
def __init__(self, string_value=None, number_value=None, **kwargs):
super(TrialComponentParameterValue, self).__init__(
string_value=string_value, number_value=number_value, **kwargs
)
def __str__(self):
"""String representation of TrialComponentParameterValue"""
if self.string_value is not None:
return self.string_value
if self.number_value is not None:
return str(self.number_value)
return ""
class TrialComponentParameters(_base_types.ApiObject):
"""A dictionary of TrialComponentParameterValues"""
@classmethod
def from_boto(cls, boto_dict, **kwargs):
"""Converts a boto dict to a dictionary of TrialComponentParameterValues
Args:
boto_dict (dict): boto response dictionary.
**kwargs: Arbitrary keyword arguments.
Returns:
dict: Dictionary of parameter values.
"""
return_map = {}
for key, value in boto_dict.items():
return_map[key] = value.get("NumberValue", value.get("StringValue", None))
return return_map
@classmethod
def to_boto(self, parameters):
"""Converts TrialComponentParameters to dict.
Args:
parameters (TrialComponentParameters): Dictionary to convert.
Returns:
dict: Dictionary of trial component parameters in boto format.
"""
boto_map = {}
for key, value in parameters.items():
if isinstance(value, numbers.Number):
boto_map[key] = {"NumberValue": value}
else:
boto_map[key] = {"StringValue": str(value)}
return boto_map
class TrialComponentArtifact(_base_types.ApiObject):
"""Trial component artifact.
Attributes:
media_type (str): The media type.
value (str): The artifact value.
"""
media_type = None
value = None
def __init__(self, value=None, media_type=None, **kwargs):
super(TrialComponentArtifact, self).__init__(value=value, media_type=media_type, **kwargs)
class TrialComponentStatus(_base_types.ApiObject):
"""Status of the trial component.
Attributes:
primary_status (str): The status of a trial component.
message (str): Status message.
"""
primary_status = None
message = None
def __init__(self, primary_status=None, message=None, **kwargs):
super(TrialComponentStatus, self).__init__(primary_status=primary_status, message=message, **kwargs)
class TrialComponentSummary(_base_types.ApiObject):
"""Summary model of a trial component.
Attributes:
trial_component_name (str): Name of trial component.
trial_component_arn (str): ARN of the trial component.
display_name (str): Friendly display name.
source_arn (str): ARN of the trial component source.
status (str): Status.
start_time (datetime): Start time.
end_time (datetime): End time.
creation_time (datetime): Creation time.
created_by (str): Created by.
last_modified_time (datetime): Date last modified.
last_modified_by (datetime): User last modified.
"""
_custom_boto_types = {
"status": (TrialComponentStatus, False),
}
trial_component_name = None
trial_component_arn = None
display_name = None
source_arn = None
status = None
start_time = None
end_time = None
creation_time = None
created_by = None
last_modified_time = None
last_modified_by = None
def __init__(self, **kwargs):
super(TrialComponentSummary, self).__init__(**kwargs)
class ExperimentSource(_base_types.ApiObject):
"""ExperimentSource
Attributes:
source_arn (str): The ARN of the source.
"""
source_arn = None
def __init__(self, source_arn=None, **kwargs):
super(ExperimentSource, self).__init__(source_arn=source_arn, **kwargs)
class TrialSource(_base_types.ApiObject):
"""TrialSource
Attributes:
source_arn (str): The ARN of the source.
"""
source_arn = None
def __init__(self, source_arn=None, **kwargs):
super(TrialSource, self).__init__(source_arn=source_arn, **kwargs)
class TrialComponentSource(_base_types.ApiObject):
"""TrialComponentsource
Attributes:
source_arn (str): The ARN of the source.
"""
source_arn = None
def __init__(self, source_arn=None, **kwargs):
super(TrialComponentSource, self).__init__(source_arn=source_arn, **kwargs)
class BatchPutMetricsError(_base_types.ApiObject):
"""BatchPutMetricsError
Attributes:
code (str): The error code.
message (str): The error message.
metric_index (int): The index of the metric.
"""
code = None
message = None
metric_index = None
def __init__(self, code=None, message=None, metric_index=None, **kwargs):
super(BatchPutMetricsError, self).__init__(code=code, message=message, metric_index=metric_index, **kwargs)
class TrainingJobSearchResult(_base_types.ApiObject):
"""Summary model of an Training Job search result.
Attributes:
training_job_name (str): The name of the training job.
training_job_arn (str): The Amazon Resource Name (ARN) of the training job.
tuning_job_arn (str): The Amazon Resource Name (ARN) of the associated.
hyperparameter tuning job if the training job was launched by a hyperparameter tuning job.
labeling_job_arn (str): The Amazon Resource Name (ARN) of the labeling job.
autoML_job_arn (str): The Amazon Resource Name (ARN) of the job.
model_artifacts (dict): Information about the Amazon S3 location that is configured for storing model artifacts.
training_job_status (str): The status of the training job
hyper_parameters (dict): Algorithm-specific parameters.
algorithm_specification (dict): Information about the algorithm used for training, and algorithm metadata.
input_data_config (dict): An array of Channel objects that describes each data input channel.
output_data_config (dict): The S3 path where model artifacts that you configured when creating the job are
stored. Amazon SageMaker creates subfolders for model artifacts.
resource_config (dict): Resources, including ML compute instances and ML storage volumes, that are configured
for model training.
debug_hook_config (dict): Configuration information for the debug hook parameters, collection configuration,
and storage paths.
debug_rule_config (dict): Information about the debug rule configuration.
"""
training_job_name = None
training_job_arn = None
tuning_job_arn = None
labeling_job_arn = None
autoML_job_arn = None
model_artifacts = None
training_job_status = None
hyper_parameters = None
algorithm_specification = None
input_data_config = None
output_data_config = None
resource_config = None
debug_hook_config = None
experiment_config = None
debug_rule_config = None
def __init__(
self,
training_job_arn=None,
training_job_name=None,
tuning_job_arn=None,
labeling_job_arn=None,
autoML_job_arn=None,
model_artifacts=None,
training_job_status=None,
hyper_parameters=None,
algorithm_specification=None,
input_data_config=None,
output_data_config=None,
resource_config=None,
debug_hook_config=None,
experiment_config=None,
debug_rule_config=None,
**kwargs
):
super(TrainingJobSearchResult, self).__init__(
training_job_arn=training_job_arn,
training_job_name=training_job_name,
tuning_job_arn=tuning_job_arn,
labeling_job_arn=labeling_job_arn,
autoML_job_arn=autoML_job_arn,
model_artifacts=model_artifacts,
training_job_status=training_job_status,
hyper_parameters=hyper_parameters,
algorithm_specification=algorithm_specification,
input_data_config=input_data_config,
output_data_config=output_data_config,
resource_config=resource_config,
debug_hook_config=debug_hook_config,
experiment_config=experiment_config,
debug_rule_config=debug_rule_config,
**kwargs
)
class ExperimentSearchResult(_base_types.ApiObject):
"""Summary model of an Experiment search result.
Attributes:
experiment_arn (str): ARN of the experiment.
experiment_name (str): Name of the experiment.
display_name (str): Display name of the experiment.
source (dict): The source of the experiment
tags (list): The list of tags that are associated with the experiment.
"""
experiment_arn = None
experiment_name = None
display_name = None
source = None
tags = None
def __init__(self, experiment_arn=None, experiment_name=None, display_name=None, source=None, tags=None, **kwargs):
super(ExperimentSearchResult, self).__init__(
experiment_arn=experiment_arn,
experiment_name=experiment_name,
display_name=display_name,
source=source,
tags=tags,
**kwargs
)
class TrialSearchResult(_base_types.ApiObject):
"""Summary model of an Trial search result.
Attributes:
trial_arn (str): ARN of the trial.
trial_name (str): Name of the trial.
display_name (str): Display name of the trial.
source (dict): The source of the trial.
tags (list): The list of tags that are associated with the trial.
trial_component_summaries (dict):
"""
trial_arn = None
trial_name = None
display_name = None
source = None
tags = None
trial_component_summaries = None
def __init__(
self,
trial_arn=None,
trial_name=None,
display_name=None,
source=None,
tags=None,
trial_component_summaries=None,
**kwargs
):
super(TrialSearchResult, self).__init__(
trial_arn=trial_arn,
trial_name=trial_name,
display_name=display_name,
source=source,
tags=tags,
trial_component_summaries=trial_component_summaries,
**kwargs
)
class TrialComponentSearchResult(_base_types.ApiObject):
"""Summary model of an Trial Component search result.
Attributes:
trial_component_arn (str): ARN of the trial component.
trial_component_name (str): Name of the trial component.
display_name (str): Display name of the trial component.
source (dict): The source of the trial component.
status (dict): The status of the trial component.
start_time (datetime): Start time.
end_time (datetime): End time.
creation_time (datetime): Creation time.
created_by (str): Created by.
last_modified_time (datetime): Date last modified.
last_modified_by (datetime): User last modified.
parameters (dict): The hyperparameters of the component.
input_artifacts (dict): The input artifacts of the component.
output_artifacts (dict): The output artifacts of the component.
metrics (list): The metrics for the component.
source_detail (dict): The source of the trial component.
tags (list): The list of tags that are associated with the trial component.
parents (dict): The parent of trial component
"""
trial_component_arn = None
trial_component_name = None
display_name = None
source = None
status = None
parameters = None
input_artifacts = None
output_artifacts = None
metrics = None
source_detail = None
tags = None
parents = None
def __init__(
self,
trial_component_arn=None,
trial_component_name=None,
start_time=None,
end_time=None,
display_name=None,
source=None,
status=None,
parameters=None,
input_artifacts=None,
output_artifacts=None,
metrics=None,
source_detail=None,
tags=None,
parents=None,
**kwargs
):
super(TrialComponentSearchResult, self).__init__(
trial_component_arn=trial_component_arn,
trial_component_name=trial_component_name,
display_name=display_name,
source=source,
status=status,
parameters=parameters,
input_artifacts=input_artifacts,
output_artifacts=output_artifacts,
metrics=metrics,
source_detail=source_detail,
tags=tags,
parents=parents,
**kwargs
) | /sagemaker_experiments-0.1.45-py3-none-any.whl/smexperiments/api_types.py | 0.914037 | 0.410756 | api_types.py | pypi |
"""Contains the TrialComponent class."""
from smexperiments import _base_types, api_types, trial
import time
class TrialComponent(_base_types.Record):
"""This class represents a SageMaker trial component object.
A trial component is a stage in a trial.
Trial components are created automatically within the SageMaker runtime and may not be created directly.
To automatically associate trial components with a trial and experiment supply an experiment config when creating a
job. For example: https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateTrainingJob.html
Parameters:
trial_component_name (str): The name of the trial component. Generated by SageMaker from the name of the
source job with a suffix specific to the type of source job.
trial_component_arn (str): The ARN of the trial component.
display_name (str): The name of the trial component that will appear in UI, such as SageMaker Studio.
source (obj): A TrialComponentSource object with a source_arn attribute.
status (str): Status of the source job.
start_time (datetime): When the source job started.
end_time (datetime): When the source job ended.
creation_time (datetime): When the source job was created.
created_by (obj): Contextual info on which account created the trial component.
last_modified_time (datetime): When the trial component was last modified.
last_modified_by (obj): Contextual info on which account last modified the trial component.
parameters (dict): Dictionary of parameters to the source job.
input_artifacts (dict): Dictionary of input artifacts.
output_artifacts (dict): Dictionary of output artifacts.
metrics (obj): Aggregated metrics for the job.
parameters_to_remove (list): The hyperparameters to remove from the component.
input_artifacts_to_remove (list): The input artifacts to remove from the component.
output_artifacts_to_remove (list): The output artifacts to remove from the component.
tags (List[dict[str, str]]): A list of tags to associate with the trial component.
"""
trial_component_name = None
trial_component_arn = None
display_name = None
source = None
status = None
start_time = None
end_time = None
creation_time = None
created_by = None
last_modified_time = None
last_modified_by = None
parameters = None
input_artifacts = None
output_artifacts = None
metrics = None
parameters_to_remove = None
input_artifacts_to_remove = None
output_artifacts_to_remove = None
tags = None
_boto_load_method = "describe_trial_component"
_boto_create_method = "create_trial_component"
_boto_update_method = "update_trial_component"
_boto_delete_method = "delete_trial_component"
_custom_boto_types = {
"source": (api_types.TrialComponentSource, False),
"status": (api_types.TrialComponentStatus, False),
"parameters": (api_types.TrialComponentParameters, False),
"input_artifacts": (api_types.TrialComponentArtifact, True),
"output_artifacts": (api_types.TrialComponentArtifact, True),
"metrics": (api_types.TrialComponentMetricSummary, True),
}
_boto_update_members = [
"trial_component_name",
"display_name",
"status",
"start_time",
"end_time",
"parameters",
"input_artifacts",
"output_artifacts",
"parameters_to_remove",
"input_artifacts_to_remove",
"output_artifacts_to_remove",
]
_boto_delete_members = ["trial_component_name"]
@classmethod
def _boto_ignore(cls):
return super(TrialComponent, cls)._boto_ignore() + ["CreatedBy"]
def save(self):
"""Save the state of this TrialComponent to SageMaker."""
return self._invoke_api(self._boto_update_method, self._boto_update_members)
def delete(self, force_disassociate=None):
"""Delete this TrialComponent from SageMaker.
Args:
force_disassociate (boolean): Indicates whether to force disassociate the trial component with the trials
before deletion. If set to true, force disassociate the trial component with associated trials first, then
delete the trial component. If it's not set or set to false, it will delete the trial component directory
without disassociation.
Returns:
dict: Delete trial component response.
"""
if force_disassociate:
next_token = None
while True:
if next_token:
list_trials_response = self.sagemaker_boto_client.list_trials(
TrialComponentName=self.trial_component_name, NextToken=next_token
)
else:
list_trials_response = self.sagemaker_boto_client.list_trials(
TrialComponentName=self.trial_component_name
)
# Disassociate the trials and trial components
for per_trial in list_trials_response["TrialSummaries"]:
# to prevent DisassociateTrialComponent throttling
time.sleep(1.2)
self.sagemaker_boto_client.disassociate_trial_component(
TrialName=per_trial["TrialName"], TrialComponentName=self.trial_component_name
)
if "NextToken" in list_trials_response:
next_token = list_trials_response["NextToken"]
else:
break
return self._invoke_api(self._boto_delete_method, self._boto_delete_members)
def list_trials(self):
"""
Load a list of trials that contains the same trial component name
Returns:
A list of trials that contains the same trial component name
"""
return trial.Trial.list(
trial_component_name=self.trial_component_name, sagemaker_boto_client=self.sagemaker_boto_client
)
@classmethod
def load(cls, trial_component_name, sagemaker_boto_client=None):
"""Load an existing trial component and return an ``TrialComponent`` object representing it.
Args:
trial_component_name (str): Name of the trial component
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker.
If not supplied, a default boto3 client will be created and used.
Returns:
smexperiments.trial_component.TrialComponent: A SageMaker ``TrialComponent`` object
"""
trial_component = cls._construct(
cls._boto_load_method,
trial_component_name=trial_component_name,
sagemaker_boto_client=sagemaker_boto_client,
)
return trial_component
@classmethod
def create(cls, trial_component_name, display_name=None, tags=None, sagemaker_boto_client=None):
"""Create a trial component and return a ``TrialComponent`` object representing it.
Args:
trial_component_name (str): The name of the trial component.
display_name (str): Display name of the trial component used by Studio. Defaults to
None.
tags (dict): Tags to add to the trial component. Defaults to None.
sagemaker_boto_client (obj): SageMaker boto client. Defaults to None.
Returns:
smexperiments.trial_component.TrialComponent: A SageMaker ``TrialComponent``
object.
"""
return super(TrialComponent, cls)._construct(
cls._boto_create_method,
trial_component_name=trial_component_name,
display_name=display_name,
tags=tags,
sagemaker_boto_client=sagemaker_boto_client,
)
@classmethod
def list(
cls,
source_arn=None,
created_before=None,
created_after=None,
sort_by=None,
sort_order=None,
sagemaker_boto_client=None,
trial_name=None,
experiment_name=None,
max_results=None,
next_token=None,
):
"""Return a list of trial component summaries.
Args:
source_arn (str, optional): A SageMaker Training or Processing Job ARN.
created_before (datetime.datetime, optional): Return trial components created before this instant.
created_after (datetime.datetime, optional): Return trial components created after this instant.
sort_by (str, optional): Which property to sort results by. One of 'SourceArn', 'CreatedBefore',
'CreatedAfter'
sort_order (str, optional): One of 'Ascending', or 'Descending'.
sagemaker_boto_client (SageMaker.Client, optional) : Boto3 client for SageMaker.
If not supplied, a default boto3 client will be created and used.
trial_name (str, optional): If provided only trial components related to the trial are returned.
experiment_name (str, optional): If provided only trial components related to the experiment are returned.
max_results (int, optional): maximum number of trial components to retrieve
next_token (str, optional): token for next page of results
Returns:
collections.Iterator[smexperiments.api_types.TrialComponentSummary]: An iterator
over ``TrialComponentSummary`` objects.
"""
return super(TrialComponent, cls)._list(
"list_trial_components",
api_types.TrialComponentSummary.from_boto,
"TrialComponentSummaries",
source_arn=source_arn,
created_before=created_before,
created_after=created_after,
sort_by=sort_by,
sort_order=sort_order,
sagemaker_boto_client=sagemaker_boto_client,
trial_name=trial_name,
experiment_name=experiment_name,
max_results=max_results,
next_token=next_token,
)
@classmethod
def search(
cls,
search_expression=None,
sort_by=None,
sort_order=None,
max_results=None,
sagemaker_boto_client=None,
):
"""
Search experiments. Returns SearchResults in the account matching the search criteria.
Args:
search_expression: (dict, optional): A Boolean conditional statement. Resource objects
must satisfy this condition to be included in search results. You must provide at
least one subexpression, filter, or nested filter.
sort_by (str, optional): The name of the resource property used to sort the SearchResults.
The default is LastModifiedTime
sort_order (str, optional): How SearchResults are ordered. Valid values are Ascending or
Descending . The default is Descending .
max_results (int, optional): The maximum number of results to return in a SearchResponse.
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker. If not
supplied, a default boto3 client will be used.
Returns:
collections.Iterator[SearchResult] : An iterator over search results matching the
search criteria.
"""
return super(TrialComponent, cls)._search(
search_resource="ExperimentTrialComponent",
search_item_factory=api_types.TrialComponentSearchResult.from_boto,
search_expression=None if search_expression is None else search_expression.to_boto(),
sort_by=sort_by,
sort_order=sort_order,
max_results=max_results,
sagemaker_boto_client=sagemaker_boto_client,
) | /sagemaker_experiments-0.1.45-py3-none-any.whl/smexperiments/trial_component.py | 0.924202 | 0.441492 | trial_component.py | pypi |
"""Contains the Trial class."""
from smexperiments import api_types, _base_types, trial_component, _utils, tracker
import time
class Trial(_base_types.Record):
"""
An execution of a data-science workflow with an experiment.
Consists of a list of trial component objects, which document individual activities within the workflow.
Examples:
.. code-block:: python
from smexperiments import trial, experiment, tracker
my_experiment = experiment.Experiment.create(experiment_name='AutoML')
my_trial = trial.Trial.create('AutoML')
# use `with` statement to ensure `my_tracker.close()` is called
with tracker.Tracker.create() as my_tracker:
# log hyper parameter of learning rate
my_tracker.log_parameter('learning_rate', 0.01)
# associate the trial component with the trial
my_trial.add_trial_component(my_tracker)
# list trial components within a trial
for trial_component in my_trial.list_trial_components():
print(trial_component)
# cleanup trial
my_trial.remove_trial_component(my_tracker)
my_trial.delete()
Parameters:
trial_name (str): The name of the trial.
experiment_name (str): The name of the trial's experiment.
tags (List[dict[str, str]]): A list of tags to associate with the trial.
"""
trial_name = None
experiment_name = None
tags = None
_boto_create_method = "create_trial"
_boto_load_method = "describe_trial"
_boto_delete_method = "delete_trial"
_boto_update_method = "update_trial"
_boto_update_members = ["trial_name", "display_name"]
_boto_delete_members = ["trial_name"]
MAX_DELETE_ALL_ATTEMPTS = 3
@classmethod
def _boto_ignore(cls):
return super(Trial, cls)._boto_ignore() + ["CreatedBy"]
def save(self):
"""Save the state of this Trial to SageMaker.
Returns:
dict: Update trial response.
"""
return self._invoke_api(self._boto_update_method, self._boto_update_members)
def delete(self):
"""Delete this Trial from SageMaker.
Requires that this Trial contains no TrialComponents. Individual TrialComponents can be removed by
calling :meth:`~smexperiments.trial.Trial.remove_trial_component`.
Returns:
dict: Delete trial response.
"""
return self._invoke_api(self._boto_delete_method, self._boto_delete_members)
@classmethod
def load(cls, trial_name, sagemaker_boto_client=None):
"""Load an existing trial and return a ``Trial`` object.
Args:
trial_name: (str): Name of the Trial.
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker.
If not supplied, a default boto3 client will be created and used.
Returns:
smexperiments.trial.Trial: A SageMaker ``Trial`` object
"""
return super(Trial, cls)._construct(
cls._boto_load_method, trial_name=trial_name, sagemaker_boto_client=sagemaker_boto_client
)
@classmethod
def create(cls, experiment_name, trial_name=None, sagemaker_boto_client=None, trial_components=None, tags=None):
"""Create a new trial and return a ``Trial`` object.
Args:
experiment_name: (str): Name of the experiment to create this trial in.
trial_name: (str, optional): Name of the Trial. If not specified, an auto-generated name will be used.
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker.
If not supplied, a default boto3 client will be created and used.
trial_components (list): A list of trial component names, trial components, or trial component trackers.
tags (List[dict[str, str]]): A list of tags to associate with the trial.
Returns:
smexperiments.trial.Trial: A SageMaker ``Trial`` object
"""
trial_name = trial_name or _utils.name("Trial")
trial = super(Trial, cls)._construct(
cls._boto_create_method,
trial_name=trial_name,
experiment_name=experiment_name,
tags=tags,
sagemaker_boto_client=sagemaker_boto_client,
)
if trial_components:
for tc in trial_components:
trial.add_trial_component(tc)
return trial
@classmethod
def list(
cls,
experiment_name=None,
trial_component_name=None,
created_before=None,
created_after=None,
sort_by=None,
sort_order=None,
sagemaker_boto_client=None,
):
"""List all trials matching the specified criteria.
Args:
experiment_name (str, optional): Name of the experiment. If specified, only trials in
the experiment will be returned.
trial_component_name (str, optional): Name of the trial component. If specified, only
trials with this trial component name will be returned.
created_before (datetime.datetime, optional): Return trials created before this instant.
created_after (datetime.datetime, optional): Return trials created after this instant.
sort_by (str, optional): Which property to sort results by. One of 'Name',
'CreationTime'.
sort_order (str, optional): One of 'Ascending', or 'Descending'.
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker.
If not supplied, a default boto3 client will be created and used.
Returns:
collections.Iterator[smexperiments.trial.TrialSummary]: An iterator over trials
matching the specified criteria.
"""
return super(Trial, cls)._list(
"list_trials",
api_types.TrialSummary.from_boto,
"TrialSummaries",
experiment_name=experiment_name,
trial_component_name=trial_component_name,
created_before=created_before,
created_after=created_after,
sort_by=sort_by,
sort_order=sort_order,
sagemaker_boto_client=sagemaker_boto_client,
)
@classmethod
def search(
cls,
search_expression=None,
sort_by=None,
sort_order=None,
max_results=None,
sagemaker_boto_client=None,
):
"""
Search experiments. Returns SearchResults in the account matching the search criteria.
Args:
search_expression: (dict, optional): A Boolean conditional statement. Resource objects
must satisfy this condition to be included in search results. You must provide at
least one subexpression, filter, or nested filter.
sort_by (str, optional): The name of the resource property used to sort the SearchResults.
The default is LastModifiedTime
sort_order (str, optional): How SearchResults are ordered. Valid values are Ascending or
Descending . The default is Descending .
max_results (int, optional): The maximum number of results to return in a SearchResponse.
sagemaker_boto_client (SageMaker.Client, optional): Boto3 client for SageMaker. If not
supplied, a default boto3 client will be used.
Returns:
collections.Iterator[SearchResult] : An iterator over search results matching the search criteria.
"""
return super(Trial, cls)._search(
search_resource="ExperimentTrial",
search_item_factory=api_types.TrialSearchResult.from_boto,
search_expression=None if search_expression is None else search_expression.to_boto(),
sort_by=sort_by,
sort_order=sort_order,
max_results=max_results,
sagemaker_boto_client=sagemaker_boto_client,
)
def add_trial_component(self, tc):
"""Add the specified trial component to this ``Trial``.
A trial component may belong to many trials and a trial may have many trial components.
Args:
tc (str or Tracker or TrialComponent or TrialComponentSummary): The trial component to
add. Can be one of a Tracker instance, a TrialComponent instance, or a string containing
the name of the trial component to add.
"""
if isinstance(tc, tracker.Tracker):
trial_component_name = tc.trial_component.trial_component_name
elif isinstance(tc, trial_component.TrialComponent):
trial_component_name = tc.trial_component_name
elif isinstance(tc, api_types.TrialComponentSummary):
trial_component_name = tc.trial_component_name
else:
trial_component_name = str(tc)
self.sagemaker_boto_client.associate_trial_component(
TrialName=self.trial_name, TrialComponentName=trial_component_name
)
def remove_trial_component(self, tc):
"""Remove the specified trial component from this trial.
Args:
tc (str or Tracker or TrialComponent or TrialComponentSummary): The trial component to
remove. Can be one of a Tracker instance, a TrialComponent instance, or a string
containing the name of the trial component to remove.
"""
if isinstance(tc, tracker.Tracker):
trial_component_name = tc.trial_component.trial_component_name
elif isinstance(tc, trial_component.TrialComponent):
trial_component_name = tc.trial_component_name
elif isinstance(tc, api_types.TrialComponentSummary):
trial_component_name = tc.trial_component_name
else:
trial_component_name = str(tc)
self.sagemaker_boto_client.disassociate_trial_component(
TrialName=self.trial_name, TrialComponentName=trial_component_name
)
def list_trial_components(
self, created_before=None, created_after=None, sort_by=None, sort_order=None, max_results=None, next_token=None
):
"""List trial components in this trial matching the specified criteria.
Args:
created_before (datetime.datetime, optional): Return trials created before this instant.
created_after (datetime.datetime, optional): Return trials created after this instant.
sort_by (str, optional): Which property to sort results by. One of 'Name',
'CreationTime'.
sort_order (str, optional): One of 'Ascending', or 'Descending'.
max_results (int, optional): maximum number of trial components to retrieve
next_token (str, optional): token for next page of results
Returns:
collections.Iterator[smexperiments.api_types.TrialComponentSummary] : An iterator over
trials matching the criteria.
"""
return trial_component.TrialComponent.list(
trial_name=self.trial_name,
created_before=created_before,
created_after=created_after,
sort_by=sort_by,
sort_order=sort_order,
max_results=max_results,
next_token=next_token,
sagemaker_boto_client=self.sagemaker_boto_client,
)
def delete_all(self, action):
"""
Force to delete the trial and associated trial components under.
Args:
action (str): pass in string '--force' to confirm delete the trial and all associated trial components.
"""
if action != "--force":
raise ValueError(
"Must confirm with string '--force' in order to delete the trial and " "associated trial components."
)
delete_attempt_count = 0
last_exception = None
while True:
if delete_attempt_count == self.MAX_DELETE_ALL_ATTEMPTS:
raise Exception("Failed to delete, please try again.") from last_exception
try:
for trial_component_summary in self.list_trial_components():
tc = trial_component.TrialComponent.load(
sagemaker_boto_client=self.sagemaker_boto_client,
trial_component_name=trial_component_summary.trial_component_name,
)
tc.delete(force_disassociate=True)
# to prevent throttling
time.sleep(1.2)
self.delete()
break
except Exception as ex:
last_exception = ex
finally:
delete_attempt_count = delete_attempt_count + 1 | /sagemaker_experiments-0.1.45-py3-none-any.whl/smexperiments/trial.py | 0.947247 | 0.363647 | trial.py | pypi |
"""Placeholder docstring"""
import re
def to_camel_case(snake_case):
"""Convert a snake case string to camel case.
Args:
snake_case (str): String to convert to camel case.
Returns:
str: String converted to camel case.
"""
return "".join([x.title() for x in snake_case.split("_")])
def to_snake_case(name):
"""Convert a camel case string to snake case
Args:
name (str): String to convert to snake case.
Returns:
str: String converted to snake case.
"""
# https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def from_boto(boto_dict, boto_name_to_member_name, member_name_to_type):
"""
Convert an UpperCamelCase boto response to a snake case representation.
Args:
boto_dict (dict[str, ?]): A boto response dictionary.
boto_name_to_member_name (dict[str, str]): A map from boto name to snake_case name. If a given boto name is
not in the map then a default mapping is applied.
member_name_to_type (dict[str, (_base_types.ApiObject, boolean)]): A map from snake case name to a type
description tuple. The first element of the tuple, a subclass of ApiObject, is the type of the mapped
object. The second element indicates whether the mapped element is a collection or singleton.
Returns:
dict: Boto response in snake case.
"""
from_boto_values = {}
for boto_name, boto_value in boto_dict.items():
# Convert the boto_name to a snake-case name by preferentially looking up the boto name in
# boto_name_to_member_name before defaulting to the snake case representation
member_name = boto_name_to_member_name.get(boto_name, to_snake_case(boto_name))
# If the member name maps to a subclass of _base_types.ApiObject (i.e. it's in member_name_to_type), then
# transform its boto dictionary using that type:
if member_name in member_name_to_type:
api_type, is_collection = member_name_to_type[member_name]
if is_collection:
if isinstance(boto_value, dict):
member_value = {key: api_type.from_boto(value) for key, value in boto_value.items()}
else:
member_value = [api_type.from_boto(item) for item in boto_value]
else:
member_value = api_type.from_boto(boto_value)
# If the member name does not have a custom type definition then simply assign it the boto value.
# Appropriate if the type is simple and requires not further conversion (e.g. a number or list of strings).
else:
member_value = boto_value
from_boto_values[member_name] = member_value
return from_boto_values
def to_boto(member_vars, member_name_to_boto_name, member_name_to_type):
"""Convert a dict of of snake case names to values into a boto UpperCamelCase representation.
Args:
member_vars dict[str, ?]: A map from snake case name to value.
member_name_to_boto_name dict[str, ?]: A map from snake_case name to boto name.
Returns:
dict: boto dict converted to snake case
"""
to_boto_values = {}
# Strip out all entries in member_vars that have a None value. None values are treated as not having a value
# set, required as API operations can have optional parameters that may not take a null value.
member_vars = {k: v for k, v in member_vars.items() if v is not None}
# Iterate over each snake_case name and its value and map to a camel case name. If the value is an ApiObject
# subclass then recursively map its entries.
for member_name, member_value in member_vars.items():
boto_name = member_name_to_boto_name.get(member_name, to_camel_case(member_name))
api_type, is_api_collection_type = member_name_to_type.get(member_name, (None, None))
if is_api_collection_type and isinstance(member_value, dict):
boto_value = {k: api_type.to_boto(v) if api_type else v for k, v in member_value.items()}
elif is_api_collection_type and isinstance(member_value, list):
boto_value = [api_type.to_boto(v) if api_type else v for v in member_value]
else:
boto_value = api_type.to_boto(member_value) if api_type else member_value
to_boto_values[boto_name] = boto_value
return to_boto_values | /sagemaker_experiments-0.1.45-py3-none-any.whl/smexperiments/_boto_functions.py | 0.856317 | 0.551393 | _boto_functions.py | pypi |
"""Simplify Search Expression by provide a simplified DSL"""
from smexperiments._base_types import ApiObject
from enum import Enum, unique
@unique
class Operator(Enum):
"""Search operators"""
EQUALS = "Equals"
NOT_EQUALS = "NotEquals"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqualTo"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqualTo"
CONTAINS = "Contains"
EXISTS = "Exists"
NOT_EXISTS = "NotExists"
@unique
class BooleanOperator(Enum):
"""Boolean search operation enum"""
AND = "And"
OR = "Or"
class SearchObject(ApiObject):
"""SearchObject"""
def to_boto(self):
"""Convert a search object to boto"""
return ApiObject.to_boto(self)
class Filter(SearchObject):
"""A Python class represent a Search Filter object."""
name = None
operator = None
value = None
def __init__(self, name, operator=None, value=None):
"""Construct a Filter object
Args:
name (str): filter field name
operator (dict): one of Operator enum
value (str): value of the field
"""
self.name = name
self.operator = None if operator is None else operator.value
self.value = value
class NestedFilter(SearchObject):
"""A Python class represent a Nested Filter object."""
nested_property_name = None
filters = None
def __init__(self, property_name, filters):
"""Construct a Nested Filter object
Args:
property_name (str): nested property name
filters (list): list of Filter objects
"""
self.nested_property_name = property_name
self.filters = list(map(lambda x: x.to_boto(), filters))
class SearchExpression(SearchObject):
"""
A Python class representation of a Search Expression object. A sample search expression defined in here:
https://boto3.amazonaws.com/v1/documentation/api/1.12.8/reference/services/sagemaker.html#SageMaker.Client.search
"""
filters = None
nested_filters = None
operator = None
sub_expressions = None
def __init__(self, filters=None, nested_filters=None, sub_expressions=None, boolean_operator=BooleanOperator.AND):
"""Construct a Search Expression object
Args:
filters (list): list of Filter objects
nested_filters (list): list of Nested Filters objects
sub_expressions (list): list of Search Expresssion objects
boolean_operator (dict): one of the boolean operator enums
"""
if filters is None and nested_filters is None and sub_expressions is None:
raise ValueError("You must specify at least one subexpression, filter, or nested filter")
self.filters = None if filters is None else list(map(lambda x: x.to_boto(), filters))
self.nested_filters = None if nested_filters is None else list(map(lambda x: x.to_boto(), nested_filters))
self.sub_expressions = None if sub_expressions is None else list(map(lambda x: x.to_boto(), sub_expressions))
self.operator = boolean_operator.value | /sagemaker_experiments-0.1.45-py3-none-any.whl/smexperiments/search_expression.py | 0.926195 | 0.371735 | search_expression.py | pypi |
import os
import random
from datetime import datetime
import boto3
import botocore
import logging
from importlib import import_module
def sagemaker_client():
"""Instantiates a SageMaker client.
Returns:
SageMaker.Client
"""
if os.environ.get("SAGEMAKER_ENDPOINT", "").strip():
return boto_session().client("sagemaker", endpoint_url=os.environ.get("SAGEMAKER_ENDPOINT"))
else:
return boto_session().client("sagemaker")
def boto_session():
"""Instantiates a boto Session.
Returns:
boto3.Session
"""
return boto3.Session(region_name=os.environ.get("AWS_REGION"))
def suffix():
"""Generate a random string of length 4"""
alph = "abcdefghijklmnopqrstuvwxyz"
return "-".join([datetime.utcnow().strftime("%Y-%m-%d-%H%M%S%f"), "".join(random.sample(alph, 4))])
def name(prefix):
"""Generate a new name with the specified prefix."""
return "-".join([prefix, suffix()])
def get_or_create_default_bucket(boto_session, default_bucket_prefix="sagemaker"):
"""Creates a default bucket if not already exists. The bucket name is a combination of a prefix, the region, and
account.
Args:
boto_session (boto3.Session): boto session
default_bucket_prefix (str): prefix to the bucket name
Returns:
str: The default bucket name.
"""
account = boto_session.client("sts").get_caller_identity()["Account"]
region = boto_session.region_name
default_bucket = "{}-{}-{}".format(default_bucket_prefix, region, account)
s3 = boto_session.resource("s3")
try:
# 'us-east-1' cannot be specified because it is the default region:
if region == "us-east-1":
s3.create_bucket(Bucket=default_bucket)
else:
s3.create_bucket(Bucket=default_bucket, CreateBucketConfiguration={"LocationConstraint": region})
except botocore.exceptions.ClientError as e:
error_code = e.response["Error"]["Code"]
message = e.response["Error"]["Message"]
logging.debug("Create Bucket failed. error code: {}, message: {}".format(error_code, message))
if error_code == "BucketAlreadyOwnedByYou":
pass
elif error_code == "OperationAborted" and "conflicting conditional operation" in message:
# If this bucket is already being concurrently created, we don't need to create it again.
pass
elif error_code == "TooManyBuckets":
# Succeed if the default bucket exists
s3.meta.client.head_bucket(Bucket=default_bucket)
else:
raise
return default_bucket
def get_module(module_name):
"""Imports an module.
Args:
module_name (str): Name of the module to importt.
Returns:
[obj]: The imported module
"""
return import_module(module_name) | /sagemaker_experiments-0.1.45-py3-none-any.whl/smexperiments/_utils.py | 0.64713 | 0.154695 | _utils.py | pypi |
import enum
import json
import os
import time
from smexperiments import trial_component
TRAINING_JOB_ARN_ENV = "TRAINING_JOB_ARN"
PROCESSING_JOB_CONFIG_PATH = "/opt/ml/config/processingjobconfig.json"
class EnvironmentType(enum.Enum):
"""SageMaker jobs which data can be pulled from the environment."""
SageMakerTrainingJob = 1
SageMakerProcessingJob = 2
class TrialComponentEnvironment(object):
"""Retrieves job specific data from the environment.
Attributes:
environment_type (EnvironmentType): The environment type.
source_arn (str): The ARN of the current job.
"""
environment_type = None
source_arn = None
def __init__(self, environment_type, source_arn):
self.environment_type = environment_type
self.source_arn = source_arn
@classmethod
def load(cls, training_job_arn_env=TRAINING_JOB_ARN_ENV, processing_job_config_path=PROCESSING_JOB_CONFIG_PATH):
"""Loads source arn of current job from environment.
Args:
training_job_arn_env (str): The environment key for training job ARN.
processing_job_config_path (str): The processing job config path.
Returns:
TrialComponentEnvironment: Job data loaded from the environment. None if config does not exist.
"""
if training_job_arn_env in os.environ:
environment_type = EnvironmentType.SageMakerTrainingJob
source_arn = os.environ.get(training_job_arn_env)
return TrialComponentEnvironment(environment_type, source_arn)
elif os.path.exists(processing_job_config_path):
environment_type = EnvironmentType.SageMakerProcessingJob
source_arn = json.loads(open(processing_job_config_path).read())["ProcessingJobArn"]
return TrialComponentEnvironment(environment_type, source_arn)
else:
return None
def get_trial_component(self, sagemaker_boto_client):
"""Retrieves the trial component from the job in the environment.
Args:
sagemaker_boto_client (SageMaker.Client): SageMaker boto client.
Returns:
TrialComponent: The trial component created from the job. None if not found.
"""
start = time.time()
while time.time() - start < 300:
summaries = list(
trial_component.TrialComponent.list(
source_arn=self.source_arn, sagemaker_boto_client=sagemaker_boto_client
)
)
if summaries:
summary = summaries[0]
return trial_component.TrialComponent.load(
trial_component_name=summary.trial_component_name, sagemaker_boto_client=sagemaker_boto_client
)
else:
time.sleep(2)
return None | /sagemaker_experiments-0.1.45-py3-none-any.whl/smexperiments/_environment.py | 0.650245 | 0.21498 | _environment.py | pypi |
import string
from typing import List
from pyspark.sql import DataFrame
from feature_store_pyspark.wrapper import SageMakerFeatureStoreJavaWrapper
class FeatureStoreManager(SageMakerFeatureStoreJavaWrapper):
"""A central manager for fature store data reporitory.
``ingest_data`` can be used to do batch data ingestion into the specified feature group. The input data should be in
the format of spark DataFrame and feature_group_arn is the specified feature group's arn. To selectively ingest to
offline/online store, specify the ``target_stores`` according to different use cases.
"""
_wrapped_class = "software.amazon.sagemaker.featurestore.sparksdk.FeatureStoreManager"
def __init__(self, assume_role_arn: string = None):
super(FeatureStoreManager, self).__init__()
self._java_obj = self._new_java_obj(FeatureStoreManager._wrapped_class, assume_role_arn)
def ingest_data(self, input_data_frame: DataFrame, feature_group_arn: str, target_stores: List[str] = None):
"""
Batch ingest data into SageMaker FeatureStore.
:param input_data_frame (DataFrame): the DataFrame to be ingested.
:param feature_group_arn (str): target feature group arn.
:param target_stores (List[str]): a list of target stores which the data should be ingested to.
:return:
"""
return self._call_java("ingestDataInJava", input_data_frame, feature_group_arn, target_stores)
def load_feature_definitions_from_schema(self, input_data_frame: DataFrame):
"""
Load feature definitions according to the schema of input DataFrame.
:param input_data_frame (DataFrame): input Spark DataFrame to be loaded.
:return: list of feature definitions loaded from input DataFrame.
"""
java_feature_definitions = self._call_java("loadFeatureDefinitionsFromSchema", input_data_frame)
return list(map(lambda definition: {
"FeatureName": definition.featureName(),
"FeatureType": definition.featureType().toString()
}, java_feature_definitions))
def get_failed_stream_ingestion_data_frame(self) -> DataFrame:
"""
Retrieve DataFrame which includes all records fail to be ingested via ``ingest_data`` method.
:return: the DataFrame of records that fail to be ingested.
"""
return self._call_java("getFailedStreamIngestionDataFrame") | /sagemaker_feature_store_pyspark_3.0-1.1.2.tar.gz/sagemaker_feature_store_pyspark_3.0-1.1.2/src/feature_store_pyspark/FeatureStoreManager.py | 0.889235 | 0.561876 | FeatureStoreManager.py | pypi |
import string
from typing import List
from pyspark.sql import DataFrame
from feature_store_pyspark.wrapper import SageMakerFeatureStoreJavaWrapper
class FeatureStoreManager(SageMakerFeatureStoreJavaWrapper):
"""A central manager for fature store data reporitory.
``ingest_data`` can be used to do batch data ingestion into the specified feature group. The input data should be in
the format of spark DataFrame and feature_group_arn is the specified feature group's arn. To selectively ingest to
offline/online store, specify the ``target_stores`` according to different use cases.
"""
_wrapped_class = "software.amazon.sagemaker.featurestore.sparksdk.FeatureStoreManager"
def __init__(self, assume_role_arn: string = None):
super(FeatureStoreManager, self).__init__()
self._java_obj = self._new_java_obj(FeatureStoreManager._wrapped_class, assume_role_arn)
def ingest_data(self, input_data_frame: DataFrame, feature_group_arn: str, target_stores: List[str] = None):
"""
Batch ingest data into SageMaker FeatureStore.
:param input_data_frame (DataFrame): the DataFrame to be ingested.
:param feature_group_arn (str): target feature group arn.
:param target_stores (List[str]): a list of target stores which the data should be ingested to.
:return:
"""
return self._call_java("ingestDataInJava", input_data_frame, feature_group_arn, target_stores)
def load_feature_definitions_from_schema(self, input_data_frame: DataFrame):
"""
Load feature definitions according to the schema of input DataFrame.
:param input_data_frame (DataFrame): input Spark DataFrame to be loaded.
:return: list of feature definitions loaded from input DataFrame.
"""
java_feature_definitions = self._call_java("loadFeatureDefinitionsFromSchema", input_data_frame)
return list(map(lambda definition: {
"FeatureName": definition.featureName(),
"FeatureType": definition.featureType().toString()
}, java_feature_definitions))
def get_failed_stream_ingestion_data_frame(self) -> DataFrame:
"""
Retrieve DataFrame which includes all records fail to be ingested via ``ingest_data`` method.
:return: the DataFrame of records that fail to be ingested.
"""
return self._call_java("getFailedStreamIngestionDataFrame") | /sagemaker_feature_store_pyspark_3.1-1.1.2.tar.gz/sagemaker_feature_store_pyspark_3.1-1.1.2/src/feature_store_pyspark/FeatureStoreManager.py | 0.889235 | 0.561876 | FeatureStoreManager.py | pypi |
import string
from typing import List
from pyspark.sql import DataFrame
from feature_store_pyspark.wrapper import SageMakerFeatureStoreJavaWrapper
class FeatureStoreManager(SageMakerFeatureStoreJavaWrapper):
"""A central manager for fature store data reporitory.
``ingest_data`` can be used to do batch data ingestion into the specified feature group. The input data should be in
the format of spark DataFrame and feature_group_arn is the specified feature group's arn. To selectively ingest to
offline/online store, specify the ``target_stores`` according to different use cases.
"""
_wrapped_class = "software.amazon.sagemaker.featurestore.sparksdk.FeatureStoreManager"
def __init__(self, assume_role_arn: string = None):
super(FeatureStoreManager, self).__init__()
self._java_obj = self._new_java_obj(FeatureStoreManager._wrapped_class, assume_role_arn)
def ingest_data(self, input_data_frame: DataFrame, feature_group_arn: str, target_stores: List[str] = None):
"""
Batch ingest data into SageMaker FeatureStore.
:param input_data_frame (DataFrame): the DataFrame to be ingested.
:param feature_group_arn (str): target feature group arn.
:param target_stores (List[str]): a list of target stores which the data should be ingested to.
:return:
"""
return self._call_java("ingestDataInJava", input_data_frame, feature_group_arn, target_stores)
def load_feature_definitions_from_schema(self, input_data_frame: DataFrame):
"""
Load feature definitions according to the schema of input DataFrame.
:param input_data_frame (DataFrame): input Spark DataFrame to be loaded.
:return: list of feature definitions loaded from input DataFrame.
"""
java_feature_definitions = self._call_java("loadFeatureDefinitionsFromSchema", input_data_frame)
return list(map(lambda definition: {
"FeatureName": definition.featureName(),
"FeatureType": definition.featureType().toString()
}, java_feature_definitions))
def get_failed_stream_ingestion_data_frame(self) -> DataFrame:
"""
Retrieve DataFrame which includes all records fail to be ingested via ``ingest_data`` method.
:return: the DataFrame of records that fail to be ingested.
"""
return self._call_java("getFailedStreamIngestionDataFrame") | /sagemaker_feature_store_pyspark_3.2-1.1.2.tar.gz/sagemaker_feature_store_pyspark_3.2-1.1.2/src/feature_store_pyspark/FeatureStoreManager.py | 0.889235 | 0.561876 | FeatureStoreManager.py | pypi |
import string
from typing import List
from pyspark.sql import DataFrame
from feature_store_pyspark.wrapper import SageMakerFeatureStoreJavaWrapper
class FeatureStoreManager(SageMakerFeatureStoreJavaWrapper):
"""A central manager for fature store data reporitory.
``ingest_data`` can be used to do batch data ingestion into the specified feature group. The input data should be in
the format of spark DataFrame and feature_group_arn is the specified feature group's arn. To selectively ingest to
offline/online store, specify the ``target_stores`` according to different use cases.
"""
_wrapped_class = "software.amazon.sagemaker.featurestore.sparksdk.FeatureStoreManager"
def __init__(self, assume_role_arn: string = None):
super(FeatureStoreManager, self).__init__()
self._java_obj = self._new_java_obj(FeatureStoreManager._wrapped_class, assume_role_arn)
def ingest_data(self, input_data_frame: DataFrame, feature_group_arn: str, target_stores: List[str] = None):
"""
Batch ingest data into SageMaker FeatureStore.
:param input_data_frame (DataFrame): the DataFrame to be ingested.
:param feature_group_arn (str): target feature group arn.
:param target_stores (List[str]): a list of target stores which the data should be ingested to.
:return:
"""
return self._call_java("ingestDataInJava", input_data_frame, feature_group_arn, target_stores)
def load_feature_definitions_from_schema(self, input_data_frame: DataFrame):
"""
Load feature definitions according to the schema of input DataFrame.
:param input_data_frame (DataFrame): input Spark DataFrame to be loaded.
:return: list of feature definitions loaded from input DataFrame.
"""
java_feature_definitions = self._call_java("loadFeatureDefinitionsFromSchema", input_data_frame)
return list(map(lambda definition: {
"FeatureName": definition.featureName(),
"FeatureType": definition.featureType().toString()
}, java_feature_definitions))
def get_failed_stream_ingestion_data_frame(self) -> DataFrame:
"""
Retrieve DataFrame which includes all records fail to be ingested via ``ingest_data`` method.
:return: the DataFrame of records that fail to be ingested.
"""
return self._call_java("getFailedStreamIngestionDataFrame") | /sagemaker_feature_store_pyspark_3.3-1.1.2.tar.gz/sagemaker_feature_store_pyspark_3.3-1.1.2/src/feature_store_pyspark/FeatureStoreManager.py | 0.889235 | 0.561876 | FeatureStoreManager.py | pypi |
import string
from typing import List
from pyspark.sql import DataFrame
from feature_store_pyspark.wrapper import SageMakerFeatureStoreJavaWrapper
class FeatureStoreManager(SageMakerFeatureStoreJavaWrapper):
"""A central manager for fature store data reporitory.
``ingest_data`` can be used to do batch data ingestion into the specified feature group. The input data should be in
the format of spark DataFrame and feature_group_arn is the specified feature group's arn. To selectively ingest to
offline/online store, specify the ``target_stores`` according to different use cases.
"""
_wrapped_class = "software.amazon.sagemaker.featurestore.sparksdk.FeatureStoreManager"
def __init__(self, assume_role_arn: string = None):
super(FeatureStoreManager, self).__init__()
self._java_obj = self._new_java_obj(FeatureStoreManager._wrapped_class, assume_role_arn)
def ingest_data(self, input_data_frame: DataFrame, feature_group_arn: str, target_stores: List[str] = None):
"""
Batch ingest data into SageMaker FeatureStore.
:param input_data_frame (DataFrame): the DataFrame to be ingested.
:param feature_group_arn (str): target feature group arn.
:param target_stores (List[str]): a list of target stores which the data should be ingested to.
:return:
"""
return self._call_java("ingestDataInJava", input_data_frame, feature_group_arn, target_stores)
def load_feature_definitions_from_schema(self, input_data_frame: DataFrame):
"""
Load feature definitions according to the schema of input DataFrame.
:param input_data_frame (DataFrame): input Spark DataFrame to be loaded.
:return: list of feature definitions loaded from input DataFrame.
"""
java_feature_definitions = self._call_java("loadFeatureDefinitionsFromSchema", input_data_frame)
return list(map(lambda definition: {
"FeatureName": definition.featureName(),
"FeatureType": definition.featureType().toString()
}, java_feature_definitions))
def get_failed_stream_ingestion_data_frame(self) -> DataFrame:
"""
Retrieve DataFrame which includes all records fail to be ingested via ``ingest_data`` method.
:return: the DataFrame of records that fail to be ingested.
"""
return self._call_java("getFailedStreamIngestionDataFrame") | /sagemaker_feature_store_pyspark-1.1.2.tar.gz/sagemaker_feature_store_pyspark-1.1.2/src/feature_store_pyspark/FeatureStoreManager.py | 0.889235 | 0.561876 | FeatureStoreManager.py | pypi |
import importlib.util
import json
import logging
import os
from pathlib import Path
from typing import Optional
from huggingface_hub import HfApi
from huggingface_hub.file_download import cached_download, hf_hub_url
from transformers import pipeline
from transformers.file_utils import is_tf_available, is_torch_available
from transformers.pipelines import Conversation, Pipeline
from sagemaker_huggingface_inference_toolkit.optimum_utils import is_optimum_neuron_available
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_aws_neuron_available = importlib.util.find_spec("torch_neuron") is not None
def is_aws_neuron_available():
return _aws_neuron_available
logger = logging.getLogger(__name__)
PYTORCH_WEIGHTS_NAME = "pytorch_model.bin"
TF2_WEIGHTS_NAME = "tf_model.h5"
FRAMEWORK_MAPPING = {"pytorch": PYTORCH_WEIGHTS_NAME, "tensorflow": TF2_WEIGHTS_NAME}
FILE_LIST_NAMES = [
"config.json",
"special_tokens_map.json",
"tokenizer_config.json",
"tokenizer.json",
"vocab.json",
"vocab.txt",
"merges.txt",
"dict.txt",
"preprocessor_config.json",
"added_tokens.json",
"README.md",
"spiece.model",
"sentencepiece.bpe.model",
"sentencepiece.bpe.vocab",
"sentence.bpe.model",
"bpe.codes",
"source.spm",
"target.spm",
"spm.model",
"sentence_bert_config.json",
"sentence_roberta_config.json",
"sentence_distilbert_config.json",
"added_tokens.json",
"model_args.json",
"entity_vocab.json",
"pooling_config.json",
]
if is_optimum_neuron_available():
FILE_LIST_NAMES.append("model.neuron")
REPO_ID_SEPARATOR = "__"
ARCHITECTURES_2_TASK = {
"TapasForQuestionAnswering": "table-question-answering",
"ForQuestionAnswering": "question-answering",
"ForTokenClassification": "token-classification",
"ForSequenceClassification": "text-classification",
"ForMultipleChoice": "multiple-choice",
"ForMaskedLM": "fill-mask",
"ForCausalLM": "text-generation",
"ForConditionalGeneration": "text2text-generation",
"MTModel": "text2text-generation",
"EncoderDecoderModel": "text2text-generation",
# Model specific task for backward comp
"GPT2LMHeadModel": "text-generation",
"T5WithLMHeadModel": "text2text-generation",
}
HF_API_TOKEN = os.environ.get("HF_API_TOKEN", None)
HF_MODEL_REVISION = os.environ.get("HF_MODEL_REVISION", None)
def wrap_conversation_pipeline(pipeline):
def wrapped_pipeline(inputs, *args, **kwargs):
converted_input = Conversation(
inputs["text"],
past_user_inputs=inputs.get("past_user_inputs", []),
generated_responses=inputs.get("generated_responses", []),
)
prediction = pipeline(converted_input, *args, **kwargs)
return {
"generated_text": prediction.generated_responses[-1],
"conversation": {
"past_user_inputs": prediction.past_user_inputs,
"generated_responses": prediction.generated_responses,
},
}
return wrapped_pipeline
def _is_gpu_available():
"""
checks if a gpu is available.
"""
if is_tf_available():
return True if len(tf.config.list_physical_devices("GPU")) > 0 else False
elif is_torch_available():
return torch.cuda.is_available()
else:
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
def _get_framework():
"""
extracts which DL framework is used for inference, if both are installed use pytorch
"""
if is_torch_available():
return "pytorch"
elif is_tf_available():
return "tensorflow"
else:
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
def _build_storage_path(model_id: str, model_dir: Path, revision: Optional[str] = None):
"""
creates storage path for hub model based on model_id and revision
"""
if "/" and revision is None:
storage_path = os.path.join(model_dir, model_id.replace("/", REPO_ID_SEPARATOR))
elif "/" and revision is not None:
storage_path = os.path.join(model_dir, model_id.replace("/", REPO_ID_SEPARATOR) + "." + revision)
elif revision is not None:
storage_path = os.path.join(model_dir, model_id + "." + revision)
else:
storage_path = os.path.join(model_dir, model_id)
return storage_path
def _load_model_from_hub(
model_id: str, model_dir: Path, revision: Optional[str] = None, use_auth_token: Optional[str] = None
):
"""
Downloads a model repository at the specified revision from the Hugging Face Hub.
All files are nested inside a folder in order to keep their actual filename
relative to that folder. `org__model.revision`
"""
logger.warn(
"This is an experimental beta features, which allows downloading model from the Hugging Face Hub on start up. "
"It loads the model defined in the env var `HF_MODEL_ID`"
)
# get all files from repository
_api = HfApi()
model_info = _api.model_info(repo_id=model_id, revision=revision, token=use_auth_token)
os.makedirs(model_dir, exist_ok=True)
# extracts base framework
framework = _get_framework()
# creates directory for saved model based on revision and model
storage_folder = _build_storage_path(model_id, model_dir, revision)
os.makedirs(storage_folder, exist_ok=True)
# filters files to download
download_file_list = [
file.rfilename
for file in model_info.siblings
if file.rfilename in FILE_LIST_NAMES + [FRAMEWORK_MAPPING[framework]]
]
# download files to storage_folder and removes cache
for file in download_file_list:
url = hf_hub_url(model_id, filename=file, revision=revision)
path = cached_download(url, cache_dir=storage_folder, force_filename=file, use_auth_token=use_auth_token)
if os.path.exists(path + ".lock"):
os.remove(path + ".lock")
return storage_folder
def infer_task_from_model_architecture(model_config_path: str, architecture_index=0) -> str:
"""
Infer task from `config.json` of trained model. It is not guaranteed to the detect, e.g. some models implement multiple architectures or
trainend on different tasks https://huggingface.co/facebook/bart-large/blob/main/config.json. Should work for every on Amazon SageMaker fine-tuned model.
It is always recommended to set the task through the env var `TASK`.
"""
with open(model_config_path, "r") as config_file:
config = json.loads(config_file.read())
architecture = config.get("architectures", [None])[architecture_index]
task = None
for arch_options in ARCHITECTURES_2_TASK:
if architecture.endswith(arch_options):
task = ARCHITECTURES_2_TASK[arch_options]
if task is None:
raise ValueError(
f"Task couldn't be inferenced from {architecture}."
f"Inference Toolkit can only inference tasks from architectures ending with {list(ARCHITECTURES_2_TASK.keys())}."
"Use env `HF_TASK` to define your task."
)
# set env to work with
os.environ["HF_TASK"] = task
return task
def infer_task_from_hub(model_id: str, revision: Optional[str] = None, use_auth_token: Optional[str] = None) -> str:
"""
Infer task from Hub by extracting `pipeline_tag` for model_info.
"""
_api = HfApi()
model_info = _api.model_info(repo_id=model_id, revision=revision, token=use_auth_token)
if model_info.pipeline_tag is not None:
# set env to work with
os.environ["HF_TASK"] = model_info.pipeline_tag
return model_info.pipeline_tag
else:
raise ValueError(
f"Task couldn't be inferenced from {model_info.pipeline_tag}." "Use env `HF_TASK` to define your task."
)
def get_pipeline(task: str, device: int, model_dir: Path, **kwargs) -> Pipeline:
"""
create pipeline class for a specific task based on local saved model
"""
if task is None:
raise EnvironmentError(
"The task for this model is not set: Please set one: https://huggingface.co/docs#how-is-a-models-type-of-inference-api-and-widget-determined"
)
# define tokenizer or feature extractor as kwargs to load it the pipeline correctly
if task in {
"automatic-speech-recognition",
"image-segmentation",
"image-classification",
"audio-classification",
"object-detection",
"zero-shot-image-classification",
}:
kwargs["feature_extractor"] = model_dir
else:
kwargs["tokenizer"] = model_dir
# load pipeline
hf_pipeline = pipeline(task=task, model=model_dir, device=device, **kwargs)
# wrapp specific pipeline to support better ux
if task == "conversational":
hf_pipeline = wrap_conversation_pipeline(hf_pipeline)
return hf_pipeline | /sagemaker_huggingface_inference_toolkit-2.2.0-py3-none-any.whl/sagemaker_huggingface_inference_toolkit/transformers_utils.py | 0.762159 | 0.22946 | transformers_utils.py | pypi |
import base64
import csv
import datetime
import json
from io import BytesIO, StringIO
import numpy as np
from sagemaker_inference import errors
from sagemaker_inference.decoder import _npy_to_numpy
from sagemaker_inference.encoder import _array_to_npy
from mms.service import PredictionException
from PIL import Image
from sagemaker_huggingface_inference_toolkit import content_types
def decode_json(content):
return json.loads(content)
def decode_csv(string_like): # type: (str) -> np.array
"""Convert a CSV object to a dictonary with list attributes.
Args:
string_like (str): CSV string.
Returns:
(dict): dictonatry for input
"""
stream = StringIO(string_like)
# detects if the incoming csv has headers
if not any(header in string_like.splitlines()[0].lower() for header in ["question", "context", "inputs"]):
raise PredictionException(
"You need to provide the correct CSV with Header columns to use it with the inference toolkit default handler.",
400,
)
# reads csv as io
request_list = list(csv.DictReader(stream))
if "inputs" in request_list[0].keys():
return {"inputs": [entry["inputs"] for entry in request_list]}
else:
return {"inputs": request_list}
def decode_image(bpayload: bytearray):
"""Convert a .jpeg / .png / .tiff... object to a proper inputs dict.
Args:
bpayload (bytes): byte stream.
Returns:
(dict): dictonatry for input
"""
image = Image.open(BytesIO(bpayload)).convert("RGB")
return {"inputs": image}
def decode_audio(bpayload: bytearray):
"""Convert a .wav / .flac / .mp3 object to a proper inputs dict.
Args:
bpayload (bytes): byte stream.
Returns:
(dict): dictonatry for input
"""
return {"inputs": bytes(bpayload)}
# https://github.com/automl/SMAC3/issues/453
class _JSONEncoder(json.JSONEncoder):
"""
custom `JSONEncoder` to make sure float and int64 ar converted
"""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif hasattr(obj, "tolist"):
return obj.tolist()
elif isinstance(obj, datetime.datetime):
return obj.__str__()
elif isinstance(obj, Image.Image):
with BytesIO() as out:
obj.save(out, format="PNG")
png_string = out.getvalue()
return base64.b64encode(png_string).decode("utf-8")
else:
return super(_JSONEncoder, self).default(obj)
def encode_json(content):
"""
encodes json with custom `JSONEncoder`
"""
return json.dumps(
content,
ensure_ascii=False,
allow_nan=False,
indent=None,
cls=_JSONEncoder,
separators=(",", ":"),
)
def encode_csv(content): # type: (str) -> np.array
"""Convert the result of a transformers pipeline to CSV.
Args:
content (dict | list): result of transformers pipeline.
Returns:
(str): object serialized to CSV
"""
stream = StringIO()
if not isinstance(content, list):
content = list(content)
column_header = content[0].keys()
writer = csv.DictWriter(stream, column_header)
writer.writeheader()
writer.writerows(content)
return stream.getvalue()
_encoder_map = {
content_types.NPY: _array_to_npy,
content_types.CSV: encode_csv,
content_types.JSON: encode_json,
}
_decoder_map = {
content_types.NPY: _npy_to_numpy,
content_types.CSV: decode_csv,
content_types.JSON: decode_json,
# image mime-types
content_types.JPEG: decode_image,
content_types.PNG: decode_image,
content_types.TIFF: decode_image,
content_types.BMP: decode_image,
content_types.GIF: decode_image,
content_types.WEBP: decode_image,
content_types.X_IMAGE: decode_image,
# audio mime-types
content_types.FLAC: decode_audio,
content_types.MP3: decode_audio,
content_types.WAV: decode_audio,
content_types.OGG: decode_audio,
content_types.X_AUDIO: decode_audio,
}
def decode(content, content_type=content_types.JSON):
"""
Decodes a specific content_type into an 🤗 Transformers object.
"""
try:
decoder = _decoder_map[content_type]
return decoder(content)
except KeyError:
raise errors.UnsupportedFormatError(content_type)
except PredictionException as pred_err:
raise pred_err
def encode(content, content_type=content_types.JSON):
"""
Encode an 🤗 Transformers object in a specific content_type.
"""
try:
encoder = _encoder_map[content_type]
return encoder(content)
except KeyError:
raise errors.UnsupportedFormatError(content_type) | /sagemaker_huggingface_inference_toolkit-2.2.0-py3-none-any.whl/sagemaker_huggingface_inference_toolkit/decoder_encoder.py | 0.789518 | 0.252021 | decoder_encoder.py | pypi |
"""This module contains functionality for converting array-like objects
to various types of objects and files."""
from __future__ import absolute_import
import json
import numpy as np
from six import BytesIO, StringIO
from sagemaker_inference import content_types, errors
def _array_to_json(array_like):
"""Convert an array-like object to JSON.
To understand better what an array-like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array-like object
to be converted to JSON.
Returns:
(str): object serialized to JSON
"""
def default(_array_like):
if hasattr(_array_like, "tolist"):
return _array_like.tolist()
return json.JSONEncoder().default(_array_like)
return json.dumps(array_like, default=default)
def _array_to_npy(array_like):
"""Convert an array-like object to the NPY format.
To understand better what an array-like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array-like object
to be converted to NPY.
Returns:
(obj): NPY array.
"""
buffer = BytesIO()
np.save(buffer, array_like)
return buffer.getvalue()
def _array_to_csv(array_like):
"""Convert an array-like object to CSV.
To understand better what an array-like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array-like object
to be converted to CSV.
Returns:
(str): object serialized to CSV
"""
stream = StringIO()
np.savetxt(stream, array_like, delimiter=",", fmt="%s")
return stream.getvalue()
_encoder_map = {
content_types.NPY: _array_to_npy,
content_types.CSV: _array_to_csv,
content_types.JSON: _array_to_json,
}
SUPPORTED_CONTENT_TYPES = set(_encoder_map.keys())
def encode(array_like, content_type):
"""Encode an array-like object in a specific content_type to a numpy array.
To understand better what an array-like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): to be converted to numpy.
content_type (str): content type to be used.
Returns:
(np.array): object converted as numpy array.
"""
try:
encoder = _encoder_map[content_type]
return encoder(array_like)
except KeyError:
raise errors.UnsupportedFormatError(content_type) | /sagemaker_inference-1.10.0.tar.gz/sagemaker_inference-1.10.0/src/sagemaker_inference/encoder.py | 0.924858 | 0.603319 | encoder.py | pypi |
"""This module contains custom exceptions."""
from __future__ import absolute_import
import textwrap
class UnsupportedFormatError(Exception):
"""Exception used to indicate that an unsupported content type was provided."""
def __init__(self, content_type, **kwargs):
self._message = textwrap.dedent(
"""Content type %s is not supported by this framework.
Please implement input_fn to to deserialize the request data or an output_fn to
serialize the response. For more information, see the SageMaker Python SDK README."""
% content_type
)
super(UnsupportedFormatError, self).__init__(self._message, **kwargs)
class BaseInferenceToolkitError(Exception):
"""Exception used to indicate a problem that occurred during inference.
This is meant to be extended from so that customers may handle errors
within inference servers.
"""
def __init__(self, status_code, message, phrase):
"""Initializes an instance of BaseInferenceToolkitError.
Args:
status_code (int): HTTP Error Status Code to send to client
message (str): Response message to send to client
phrase (str): Response body to send to client
"""
self.status_code = status_code
self.message = message
self.phrase = phrase
super(BaseInferenceToolkitError, self).__init__(status_code, message, phrase)
class GenericInferenceToolkitError(BaseInferenceToolkitError):
"""Exception used to indicate a problem that occurred during inference.
This is meant to be a generic implementation of the BaseInferenceToolkitError
for re-raising unexpected exceptions in a way that can be sent back to the client.
"""
def __init__(self, status_code, message=None, phrase=None):
"""Initializes an instance of GenericInferenceToolkitError.
Args:
status_code (int): HTTP Error Status Code to send to client
message (str): Response message to send to client
phrase (str): Response body to send to client
"""
message = message or "Invalid Request"
phrase = phrase or message
super(GenericInferenceToolkitError, self).__init__(status_code, message, phrase) | /sagemaker_inference-1.10.0.tar.gz/sagemaker_inference-1.10.0/src/sagemaker_inference/errors.py | 0.898003 | 0.193052 | errors.py | pypi |
from __future__ import absolute_import
import re
CONTENT_TYPE_REGEX = re.compile("^[Cc]ontent-?[Tt]ype")
def read_file(path, mode="r"):
"""Read data from a file.
Args:
path (str): path to the file.
mode (str): mode which the file will be open.
Returns:
(str): contents of the file.
"""
with open(path, mode) as f:
return f.read()
def write_file(path, data, mode="w"): # type: (str, str, str) -> None
"""Write data to a file.
Args:
path (str): path to the file.
data (str): data to be written to the file.
mode (str): mode which the file will be open.
"""
with open(path, mode) as f:
f.write(data)
def retrieve_content_type_header(request_property):
"""Retrieve Content-Type header from incoming request.
This function handles multiple spellings of Content-Type based on the presence of
the dash and initial capitalization in each respective word.
Args:
request_property (dict): incoming request metadata
Returns:
(str): the request content type.
"""
for key in request_property:
if CONTENT_TYPE_REGEX.match(key):
return request_property[key]
return None
def parse_accept(accept):
"""Parses the Accept header sent with a request.
Args:
accept (str): the value of an Accept header.
Returns:
(list): A list containing the MIME types that the client is able to
understand.
"""
return accept.replace(" ", "").split(",")
def remove_crlf(illegal_string):
"""Removes characters prohibited by the MMS dependency Netty.
https://github.com/netty/netty/issues/8312
Args:
illegal_string: The string containing prohibited characters.
Returns:
str: The input string with the prohibited characters removed.
"""
prohibited = ("\r", "\n")
sanitized_string = illegal_string
for character in prohibited:
sanitized_string = sanitized_string.replace(character, " ")
return sanitized_string | /sagemaker_inference-1.10.0.tar.gz/sagemaker_inference-1.10.0/src/sagemaker_inference/utils.py | 0.844665 | 0.359926 | utils.py | pypi |
import textwrap
from sagemaker_inference import decoder, encoder, errors, utils
class DefaultInferenceHandler(object):
"""Bare-bones implementation of default inference functions."""
def default_model_fn(self, model_dir, context=None):
"""Function responsible for loading the model.
Args:
model_dir (str): The directory where model files are stored.
context (obj): the request context (default: None).
Returns:
obj: the loaded model.
"""
raise NotImplementedError(
textwrap.dedent(
"""
Please provide a model_fn implementation.
See documentation for model_fn at https://sagemaker.readthedocs.io/en/stable/
"""
)
)
def default_input_fn(self, input_data, content_type, context=None):
# pylint: disable=unused-argument, no-self-use
"""Function responsible for deserializing the input data into an object for prediction.
Args:
input_data (obj): the request data.
content_type (str): the request content type.
context (obj): the request context (default: None).
Returns:
obj: data ready for prediction.
"""
return decoder.decode(input_data, content_type)
def default_predict_fn(self, data, model, context=None):
"""Function responsible for model predictions.
Args:
model (obj): model loaded by the model_fn.
data: deserialized data returned by the input_fn.
context (obj): the request context (default: None).
Returns:
obj: prediction result.
"""
raise NotImplementedError(
textwrap.dedent(
"""
Please provide a predict_fn implementation.
See documentation for predict_fn at https://sagemaker.readthedocs.io/en/stable/
"""
)
)
def default_output_fn(self, prediction, accept, context=None): # pylint: disable=no-self-use
"""Function responsible for serializing the prediction result to the desired accept type.
Args:
prediction (obj): prediction result returned by the predict_fn.
accept (str): accept header expected by the client.
context (obj): the request context (default: None).
Returns:
obj: prediction data.
"""
for content_type in utils.parse_accept(accept):
if content_type in encoder.SUPPORTED_CONTENT_TYPES:
return encoder.encode(prediction, content_type), content_type
raise errors.UnsupportedFormatError(accept) | /sagemaker_inference-1.10.0.tar.gz/sagemaker_inference-1.10.0/src/sagemaker_inference/default_inference_handler.py | 0.884881 | 0.33035 | default_inference_handler.py | pypi |
"""This module contains functionality for converting various types of
files and objects to NumPy arrays."""
from __future__ import absolute_import
import json
import numpy as np
import scipy.sparse
from six import BytesIO, StringIO
from sagemaker_inference import content_types, errors
def _json_to_numpy(string_like, dtype=None): # type: (str) -> np.array
"""Convert a JSON object to a numpy array.
Args:
string_like (str): JSON string.
dtype (dtype, optional): Data type of the resulting array.
If None, the dtypes will be determined by the contents
of each column, individually. This argument can only be
used to 'upcast' the array. For downcasting, use the
.astype(t) method.
Returns:
(np.array): numpy array
"""
data = json.loads(string_like)
return np.array(data, dtype=dtype)
def _csv_to_numpy(string_like, dtype=None): # type: (str) -> np.array
"""Convert a CSV object to a numpy array.
Args:
string_like (str): CSV string.
dtype (dtype, optional): Data type of the resulting array. If None,
the dtypes will be determined by the contents of each column,
individually. This argument can only be used to 'upcast' the array.
For downcasting, use the .astype(t) method.
Returns:
(np.array): numpy array
"""
stream = StringIO(string_like)
return np.genfromtxt(stream, dtype=dtype, delimiter=",")
def _npy_to_numpy(npy_array): # type: (object) -> np.array
"""Convert a NPY array into numpy.
Args:
npy_array (npy array): to be converted to numpy array
Returns:
(np.array): converted numpy array.
"""
stream = BytesIO(npy_array)
return np.load(stream, allow_pickle=True)
def _npz_to_sparse(npz_bytes): # type: (object) -> scipy.sparse.spmatrix
"""Convert .npz-formatted data to a sparse matrix.
Args:
npz_bytes (object): Bytes encoding a sparse matrix in the .npz format.
Returns:
(scipy.sparse.spmatrix): A sparse matrix.
"""
buffer = BytesIO(npz_bytes)
return scipy.sparse.load_npz(buffer)
_decoder_map = {
content_types.NPY: _npy_to_numpy,
content_types.CSV: _csv_to_numpy,
content_types.JSON: _json_to_numpy,
content_types.NPZ: _npz_to_sparse,
}
def decode(obj, content_type):
"""Decode an object that is encoded as one of the default content types.
Args:
obj (object): to be decoded.
content_type (str): content type to be used.
Returns:
object: decoded object for prediction.
"""
try:
decoder = _decoder_map[content_type]
return decoder(obj)
except KeyError:
raise errors.UnsupportedFormatError(content_type) | /sagemaker_inference-1.10.0.tar.gz/sagemaker_inference-1.10.0/src/sagemaker_inference/decoder.py | 0.927552 | 0.606469 | decoder.py | pypi |
"""This module contains functionality for the default handler service."""
from __future__ import absolute_import
import os
from sagemaker_inference.transformer import Transformer
PYTHON_PATH_ENV = "PYTHONPATH"
class DefaultHandlerService(object):
"""Default handler service that is executed by the model server.
The handler service is responsible for defining an ``initialize`` and ``handle`` method.
- The ``handle`` method is invoked for all incoming inference requests to the model server.
- The ``initialize`` method is invoked at model server start up.
Implementation of: https://github.com/awslabs/multi-model-server/blob/master/docs/custom_service.md
"""
def __init__(self, transformer=None):
self._service = transformer if transformer else Transformer()
def handle(self, data, context):
"""Handles an inference request with input data and makes a prediction.
Args:
data (obj): the request data.
context (obj): metadata on the incoming request data.
Returns:
list[obj]: The return value from the Transformer.transform method,
which is a serialized prediction result wrapped in a list if
inference is successful. Otherwise returns an error message
with the context set appropriately.
"""
return self._service.transform(data, context)
def initialize(self, context):
"""Calls the Transformer method that validates the user module against
the SageMaker inference contract.
"""
properties = context.system_properties
model_dir = properties.get("model_dir")
# add model_dir/code to python path
code_dir_path = "{}:".format(model_dir + "/code")
if PYTHON_PATH_ENV in os.environ:
os.environ[PYTHON_PATH_ENV] = code_dir_path + os.environ[PYTHON_PATH_ENV]
else:
os.environ[PYTHON_PATH_ENV] = code_dir_path
self._service.validate_and_initialize(model_dir=model_dir, context=context) | /sagemaker_inference-1.10.0.tar.gz/sagemaker_inference-1.10.0/src/sagemaker_inference/default_handler_service.py | 0.884713 | 0.296374 | default_handler_service.py | pypi |
from __future__ import absolute_import
import os
import mxnet as mx
from sagemaker_inference import (
content_types,
decoder,
default_inference_handler,
encoder,
errors,
)
from sagemaker_mxnet_serving_container.utils import (
get_default_context,
parse_accept,
read_data_shapes,
)
PREFERRED_BATCH_SIZE_PARAM = 'SAGEMAKER_DEFAULT_MODEL_FIRST_DIMENSION_SIZE'
INFERENCE_ACCELERATOR_PRESENT_ENV = 'SAGEMAKER_INFERENCE_ACCELERATOR_PRESENT'
DEFAULT_MODEL_NAME = 'model'
DEFAULT_MODEL_FILENAMES = {
'symbol': 'model-symbol.json',
'params': 'model-0000.params',
'shapes': 'model-shapes.json',
}
class DefaultMXNetInferenceHandler(default_inference_handler.DefaultInferenceHandler):
VALID_CONTENT_TYPES = (content_types.JSON, content_types.NPY)
def default_model_fn(self, model_dir, preferred_batch_size=1):
"""Function responsible for loading the model. This implementation is designed to work with
the default save function provided for MXNet training.
Args:
model_dir (str): The directory where model files are stored
preferred_batch_size (int): preferred batch size of the model's data shape.
Defaults to 1.
Returns:
mxnet.mod.Module: the loaded model.
"""
for f in DEFAULT_MODEL_FILENAMES.values():
path = os.path.join(model_dir, f)
if not os.path.exists(path):
raise ValueError('Failed to load model with default model_fn: missing file {}.'
'Expected files: {}'.format(f, [file_name for _, file_name
in DEFAULT_MODEL_FILENAMES.items()]))
shapes_file = os.path.join(model_dir, DEFAULT_MODEL_FILENAMES['shapes'])
preferred_batch_size = preferred_batch_size or os.environ.get(PREFERRED_BATCH_SIZE_PARAM)
data_names, data_shapes = read_data_shapes(shapes_file, preferred_batch_size)
sym, args, aux = mx.model.load_checkpoint(os.path.join(model_dir, DEFAULT_MODEL_NAME), 0)
ctx = mx.eia() if os.environ.get(INFERENCE_ACCELERATOR_PRESENT_ENV) == 'true' else get_default_context()
mod = mx.mod.Module(symbol=sym, context=ctx, data_names=data_names, label_names=None)
mod.bind(for_training=False, data_shapes=data_shapes)
mod.set_params(args, aux, allow_missing=True)
return mod
def default_input_fn(self, input_data, content_type):
"""Take request data and deserialize it into an MXNet NDArray for prediction.
When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server,
the model server receives two pieces of information:
- The request's content type, for example "application/json"
- The request data
The ``input_fn`` is responsible for preprocessing request data before prediction.
Args:
input_data (obj): the request data
content_type (str): the request's content type
Returns:
mxnet.nd.array: an MXNet NDArray
Raises:
sagemaker_inference.errors.UnsupportedFormatError: if an unsupported content type is used.
"""
if content_type in self.VALID_CONTENT_TYPES:
np_array = decoder.decode(input_data, content_type)
return mx.nd.array(np_array).as_in_context(get_default_context())
else:
raise errors.UnsupportedFormatError(content_type)
def default_output_fn(self, prediction, accept):
"""Serialize the prediction into a response.
Args:
prediction (mxnet.nd.array): an MXNet NDArray that is the result of a prediction
accept (str): the accept content type expected by the client
Returns:
obj: prediction data.
Raises:
sagemaker_inference.errors.UnsupportedFormatError: if an unsupported content type is used.
"""
for content_type in parse_accept(accept):
if content_type in self.VALID_CONTENT_TYPES:
return encoder.encode(prediction.asnumpy().tolist(), content_type)
raise errors.UnsupportedFormatError(accept)
class DefaultModuleInferenceHandler(DefaultMXNetInferenceHandler):
VALID_CONTENT_TYPES = (content_types.JSON, content_types.CSV, content_types.NPY)
def default_input_fn(self, input_data, content_type, model=None):
"""Take request data and deserialize it into an object for prediction.
When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server,
the model server receives two pieces of information:
- The request's content type, for example "application/json"
- The request data
The ``input_fn`` is responsible for preprocessing request data before prediction.
Args:
input_data (obj): the request data
content_type (str): the request's content type
model (obj): an MXNet model
Returns:
mxnet.io.NDArrayIter: data ready for prediction.
Raises:
sagemaker_inference.errors.UnsupportedFormatError: if an unsupported content type is used.
"""
if content_type not in self.VALID_CONTENT_TYPES:
raise errors.UnsupportedFormatError(content_type)
np_array = decoder.decode(input_data, content_type)
ndarray = mx.nd.array(np_array).as_in_context(get_default_context())
# We require model to only have one input
[data_shape] = model.data_shapes
# Reshape flattened CSV as specified by the model
if content_type == content_types.CSV:
_, data = data_shape
# infer batch dimension from input ndarray
if isinstance(data, tuple):
target_shape = (-1,) + data[1:]
elif isinstance(data, list):
target_shape = [-1] + data[1:]
else:
raise TypeError("Input shape has to be list or tuple.")
ndarray = ndarray.reshape(target_shape)
# Batch size is the first dimension of model input
model_batch_size = data_shape[1][0]
# no padding when batch size is 1
pad_rows = 0 if model_batch_size == 1 else model_batch_size - ndarray.shape[0] % model_batch_size
model_input = mx.io.NDArrayIter(ndarray, batch_size=model_batch_size, last_batch_handle='pad')
if pad_rows:
# Update the getpad method on the model_input data iterator to return the amount of
# padding. MXNet will ignore the last getpad() rows during Module predict.
def _getpad():
return pad_rows
model_input.getpad = _getpad
return model_input
def default_predict_fn(self, data, model):
"""Use the model to create a prediction for the data.
Args:
data (mxnet.io.NDArrayIter): input data for prediction
model (mxnet.module.BaseModule): an MXNet Module
Returns:
list: the prediction result. This will be either a list of ``mxnet.nd.array`` or
a list of lists of ``mxnet.nd.array``
"""
return model.predict(data)
class DefaultGluonBlockInferenceHandler(DefaultMXNetInferenceHandler):
def default_predict_fn(self, data, block):
"""Use the model to create a prediction for the data.
Args:
data (mxnet.nd.array): input data for prediction (deserialized by ``input_fn``)
block (mxnet.gluon.block.Block): a Gluon neural network
Returns:
mxnet.nd.array: the prediction result
"""
return block(data) | /sagemaker_mxnet_inference-1.5.5.tar.gz/sagemaker_mxnet_inference-1.5.5/src/sagemaker_mxnet_serving_container/default_inference_handler.py | 0.775009 | 0.276376 | default_inference_handler.py | pypi |
from abc import ABCMeta, abstractmethod
from pyspark import keyword_only
from pyspark.ml.util import Identifiable
from pyspark.ml.wrapper import JavaEstimator
from sagemaker_pyspark import SageMakerJavaWrapper, RandomNamePolicyFactory, SageMakerClients, \
IAMRoleFromConfig, S3AutoCreatePath, Option
_sagemaker_spark_sdk_package = "com.amazonaws.services.sagemaker.sparksdk"
class EndpointCreationPolicy(object):
"""Determines whether and when to create the Endpoint and other Hosting resources.
Attributes:
CREATE_ON_CONSTRUCT: create the Endpoint upon creation of the SageMakerModel, at the end
of fit()
CREATE_ON_TRANSFORM: create the Endpoint upon invocation of SageMakerModel.transform().
DO_NOT_CREATE: do not create the Endpoint.
"""
class _CreateOnConstruct(SageMakerJavaWrapper):
_wrapped_class = \
"%s.EndpointCreationPolicy.CREATE_ON_CONSTRUCT" % _sagemaker_spark_sdk_package
class _CreateOnTransform(SageMakerJavaWrapper):
_wrapped_class = \
"%s.EndpointCreationPolicy.CREATE_ON_TRANSFORM" % _sagemaker_spark_sdk_package
class _DoNotCreate(SageMakerJavaWrapper):
_wrapped_class = "%s.EndpointCreationPolicy.DO_NOT_CREATE" % _sagemaker_spark_sdk_package
CREATE_ON_CONSTRUCT = _CreateOnConstruct()
CREATE_ON_TRANSFORM = _CreateOnTransform()
DO_NOT_CREATE = _DoNotCreate()
class SageMakerEstimatorBase(SageMakerJavaWrapper, JavaEstimator):
"""Adapts a SageMaker learning Algorithm to a Spark Estimator.
Fits a :class:`~sagemaker_pyspark.SageMakerModel` by running a SageMaker Training Job on a Spark
Dataset. Each call to :meth:`.fit` submits a new SageMaker Training Job, creates a new
SageMaker Model, and creates a new SageMaker Endpoint Config. A new Endpoint is either
created by or the returned SageMakerModel is configured to generate an Endpoint on
SageMakerModel transform.
On fit, the input :class:`~pyspark.sql.Dataset` is serialized with the specified
trainingSparkDataFormat using the specified trainingSparkDataFormatOptions and uploaded
to an S3 location specified by ``trainingInputS3DataPath``. The serialized Dataset
is compressed with ``trainingCompressionCodec``, if not None.
``trainingProjectedColumns`` can be used to control which columns on the input Dataset are
transmitted to SageMaker. If not None, then only those column names will be serialized as input
to the SageMaker Training Job.
A Training Job is created with the uploaded Dataset being input to the specified
``trainingChannelName``, with the specified ``trainingInputMode``. The algorithm is
specified ``trainingImage``, a Docker image URI reference. The Training Job is created with
trainingInstanceCount instances of type ``trainingInstanceType``. The Training Job will
time-out after attr:`trainingMaxRuntimeInSeconds`, if not None.
SageMaker Training Job hyperparameters are built from the :class:`~pyspark.ml.param.Param`s
set on this Estimator. Param objects set on this Estimator are retrieved during fit and
converted to a SageMaker Training Job hyperparameter Map. Param objects are iterated over by
invoking :meth:`pyspark.ml.param.Params.params` on this Estimator.
Param objects with neither a default value nor a set value are ignored. If a Param is not set
but has a default value, the default value will be used. Param values are converted to SageMaker
hyperparameter String values.
SageMaker uses the IAM Role with ARN ``sagemakerRole`` to access the input and output S3
buckets and trainingImage if the image is hosted in ECR. SageMaker Training Job output is
stored in a Training Job specific sub-prefix of ``trainingOutputS3DataPath``. This contains
the SageMaker Training Job output file as well as the SageMaker Training Job model file.
After the Training Job is created, this Estimator will poll for success. Upon success a
SageMakerModel is created and returned from fit. The SageMakerModel is created with a
modelImage Docker image URI, defining the SageMaker model primary container and with
``modelEnvironmentVariables`` environment variables. Each SageMakerModel has a corresponding
SageMaker hosting Endpoint. This Endpoint runs on at least endpointInitialInstanceCount
instances of type endpointInstanceType. The Endpoint is created either during construction of
the SageMakerModel or on the first call to
:class:`~sagemaker_pyspark.JavaSageMakerModel.transform`, controlled by
``endpointCreationPolicy``.
Each Endpointinstance runs with sagemakerRole IAMRole.
The transform method on SageMakerModel uses ``requestRowSerializer`` to serialize Rows from
the Dataset undergoing transformation, to requests on the hosted SageMaker Endpoint. The
``responseRowDeserializer`` is used to convert the response from the Endpoint to a series of
Rows, forming the transformed Dataset. If ``modelPrependInputRowsToTransformationRows`` is
true, then each transformed Row is also prepended with its corresponding input Row.
"""
__metaclass__ = ABCMeta
@keyword_only
def __init__(self, **kwargs):
super(SageMakerEstimatorBase, self).__init__()
self._java_obj = self._get_java_obj(**kwargs)
self._resetUid(self._call_java("uid"))
@abstractmethod
def _get_java_obj(self, **kwargs):
raise NotImplementedError()
@property
def latestTrainingJob(self):
return self._call_java("latestTrainingJob")
@property
def trainingImage(self):
return self._call_java("trainingImage")
@property
def modelImage(self):
return self._call_java("modelImage")
@property
def requestRowSerializer(self):
return self._call_java("requestRowSerializer")
@property
def responseRowDeserializer(self):
return self._call_java("responseRowDeserializer")
@property
def sagemakerRole(self):
return self._call_java("sagemakerRole")
@property
def trainingInputS3DataPath(self):
return self._call_java("trainingInputS3DataPath")
@property
def trainingOutputS3DataPath(self):
return self._call_java("trainingOutputS3DataPath")
@property
def trainingInstanceType(self):
return self._call_java("trainingInstanceType")
@property
def trainingInstanceCount(self):
return self._call_java("trainingInstanceCount")
@property
def trainingInstanceVolumeSizeInGB(self):
return self._call_java("trainingInstanceVolumeSizeInGB")
@property
def trainingProjectedColumns(self):
return self._call_java("trainingProjectedColumns")
@property
def trainingChannelName(self):
return self._call_java("trainingChannelName")
@property
def trainingContentType(self):
return self._call_java("trainingContentType")
@property
def trainingS3DataDistribution(self):
return self._call_java("trainingS3DataDistribution")
@property
def trainingSparkDataFormat(self):
return self._call_java("trainingSparkDataFormat")
@property
def trainingSparkDataFormatOptions(self):
return self._call_java("trainingSparkDataFormatOptions")
@property
def trainingInputMode(self):
return self._call_java("trainingInputMode")
@property
def trainingCompressionCodec(self):
return self._call_java("trainingCompressionCodec")
@property
def trainingMaxRuntimeInSeconds(self):
return self._call_java("trainingMaxRuntimeInSeconds")
@property
def trainingKmsKeyId(self):
return self._call_java("trainingKmsKeyId")
@property
def modelEnvironmentVariables(self):
return self._call_java("modelEnvironmentVariables")
@property
def endpointInstanceType(self):
return self._call_java("endpointInstanceType")
@property
def endpointInitialInstanceCount(self):
return self._call_java("endpointInitialInstanceCount")
@property
def endpointCreationPolicy(self):
return self._call_java("endpointCreationPolicy")
@property
def sagemakerClient(self):
return self._call_java("sagemakerClient")
@property
def s3Client(self):
return self._call_java("s3Client")
@property
def stsClient(self):
return self._call_java("stsClient")
@property
def modelPrependInputRowsToTransformationRows(self):
return self._call_java("modelPrependInputRowsToTransformationRows")
@property
def deleteStagingDataAfterTraining(self):
return self._call_java("deleteStagingDataAfterTraining")
@property
def namePolicyFactory(self):
return self._call_java("namePolicyFactory")
@property
def hyperParameters(self):
return self._call_java("hyperParameters")
def fit(self, dataset):
"""Fits a SageMakerModel on dataset by running a SageMaker training job.
Args:
dataset (Dataset): the dataset to use for the training job.
Returns:
JavaSageMakerModel: The Model created by the training job.
"""
self._transfer_params_to_java()
return self._call_java("fit", dataset)
def copy(self, extra):
raise NotImplementedError()
# Create model is a no-op for us since we override fit().
def _create_model(self, java_model):
pass
def _to_java(self):
self._transfer_params_to_java()
return self._java_object
class SageMakerEstimator(SageMakerEstimatorBase):
"""Adapts a SageMaker learning Algorithm to a Spark Estimator.
Fits a :class:`~sagemaker_pyspark.SageMakerModel` by running a SageMaker Training Job on a Spark
Dataset. Each call to :meth:`.fit` submits a new SageMaker Training Job, creates a new
SageMaker Model, and creates a new SageMaker Endpoint Config. A new Endpoint is either
created by or the returned SageMakerModel is configured to generate an Endpoint on
SageMakerModel transform.
On fit, the input :class:`~pyspark.sql.Dataset` is serialized with the specified
trainingSparkDataFormat using the specified trainingSparkDataFormatOptions and uploaded
to an S3 location specified by ``trainingInputS3DataPath``. The serialized Dataset
is compressed with ``trainingCompressionCodec``, if not None.
``trainingProjectedColumns`` can be used to control which columns on the input Dataset are
transmitted to SageMaker. If not None, then only those column names will be serialized as input
to the SageMaker Training Job.
A Training Job is created with the uploaded Dataset being input to the specified
``trainingChannelName``, with the specified ``trainingInputMode``. The algorithm is
specified ``trainingImage``, a Docker image URI reference. The Training Job is created with
trainingInstanceCount instances of type ``trainingInstanceType``. The Training Job will
time-out after attr:`trainingMaxRuntimeInSeconds`, if not None.
SageMaker Training Job hyperparameters are built from the :class:`~pyspark.ml.param.Param`s
set on this Estimator. Param objects set on this Estimator are retrieved during fit and
converted to a SageMaker Training Job hyperparameter Map. Param objects are iterated over by
invoking :meth:`pyspark.ml.param.Params.params` on this Estimator.
Param objects with neither a default value nor a set value are ignored. If a Param is not set
but has a default value, the default value will be used. Param values are converted to SageMaker
hyperparameter String values.
SageMaker uses the IAM Role with ARN ``sagemakerRole`` to access the input and output S3
buckets and trainingImage if the image is hosted in ECR. SageMaker Training Job output is
stored in a Training Job specific sub-prefix of ``trainingOutputS3DataPath``. This contains
the SageMaker Training Job output file as well as the SageMaker Training Job model file.
After the Training Job is created, this Estimator will poll for success. Upon success a
SageMakerModel is created and returned from fit. The SageMakerModel is created with a
modelImage Docker image URI, defining the SageMaker model primary container and with
``modelEnvironmentVariables`` environment variables. Each SageMakerModel has a corresponding
SageMaker hosting Endpoint. This Endpoint runs on at least endpointInitialInstanceCount
instances of type endpointInstanceType. The Endpoint is created either during construction of
the SageMakerModel or on the first call to
:class:`~sagemaker_pyspark.JavaSageMakerModel.transform`, controlled by
``endpointCreationPolicy``.
Each Endpointinstance runs with sagemakerRole IAMRole.
The transform method on SageMakerModel uses ``requestRowSerializer`` to serialize Rows from
the Dataset undergoing transformation, to requests on the hosted SageMaker Endpoint. The
``responseRowDeserializer`` is used to convert the response from the Endpoint to a series of
Rows, forming the transformed Dataset. If ``modelPrependInputRowsToTransformationRows`` is
true, then each transformed Row is also prepended with its corresponding input Row.
Args:
trainingImage (String): A SageMaker Training Job Algorithm Specification Training Image
Docker image URI.
modelImage (String): A SageMaker Model hosting Docker image URI.
sageMakerRole (IAMRole): The SageMaker TrainingJob and Hosting IAM Role. Used by
SageMaker to access S3 and ECR Resources. SageMaker hosted Endpoint instances
launched by this Estimator run with this role.
trainingInstanceType (str): The SageMaker TrainingJob Instance Type to use.
trainingInstanceCount (int): The number of instances of instanceType to run an
SageMaker Training Job with.
endpointInstanceType (str): The SageMaker Endpoint Config instance type.
endpointInitialInstanceCount (int): The SageMaker Endpoint Config minimum number of
instances that can be used to host modelImage.
requestRowSerializer (RequestRowSerializer): Serializes Spark DataFrame Rows for
transformation by Models built from this Estimator.
responseRowDeserializer (ResponseRowDeserializer): Deserializes an Endpoint response into a
series of Rows.
hyperParameters (dict): A dict from hyperParameter names to their respective values for
training.
trainingInputS3DataPath (S3Resource): An S3 location to upload SageMaker Training Job input
data to.
trainingOutputS3DataPath (S3Resource): An S3 location for SageMaker to store Training Job
output data to.
trainingInstanceVolumeSizeInGB (int): The EBS volume size in gigabytes of each instance.
trainingProjectedColumns (List): The columns to project from the Dataset being fit before
training. If an Optional.empty is passed then no specific projection will occur and
all columns will be serialized.
trainingChannelName (str): The SageMaker Channel name to input serialized Dataset fit
input to.
trainingContentType (str): The MIME type of the training data.
trainingS3DataDistribution (str): The SageMaker Training Job S3 data distribution scheme.
trainingSparkDataFormat (str): The Spark Data Format name used to serialize the Dataset
being fit for input to SageMaker.
trainingSparkDataFormatOptions (dict): The Spark Data Format Options used during
serialization of the Dataset being fit.
trainingInputMode (str): The SageMaker Training Job Channel input mode.
trainingCompressionCodec (str): The type of compression to use when serializing the
Dataset being fit for input to SageMaker.
trainingMaxRuntimeInSeconds (int): A SageMaker Training Job Termination Condition
MaxRuntimeInHours.
trainingKmsKeyId (str): A KMS key ID for the Output Data Source.
modelEnvironmentVariables (dict): The environment variables that SageMaker will set on the
model container during execution.
endpointCreationPolicy (EndpointCreationPolicy): Defines how a SageMaker Endpoint
referenced by a SageMakerModel is created.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send CreateTrainingJob,
CreateModel, and CreateEndpoint requests.
s3Client (AmazonS3): Used to create a bucket for staging SageMaker Training Job
input and/or output if either are set to S3AutoCreatePath.
stsClient (AmazonSTS): Used to resolve the account number when creating staging
input / output buckets.
modelPrependInputRowsToTransformationRows (bool): Whether the transformation result on
Models built by this Estimator should also include the input Rows. If true,
each output Row is formed by a concatenation of the input Row with the corresponding
Row produced by SageMaker Endpoint invocation, produced by responseRowDeserializer.
If false, each output Row is just taken from responseRowDeserializer.
deleteStagingDataAfterTraining (bool): Whether to remove the training data on s3 after
training is complete or failed.
namePolicyFactory (NamePolicyFactory): The NamePolicyFactory to use when naming SageMaker
entities created during fit.
uid (str): The unique identifier of this Estimator. Used to represent this stage in Spark
ML pipelines.
"""
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.SageMakerEstimator"
def __init__(self,
trainingImage,
modelImage,
trainingInstanceType,
trainingInstanceCount,
endpointInstanceType,
endpointInitialInstanceCount,
requestRowSerializer,
responseRowDeserializer,
hyperParameters=None,
trainingInputS3DataPath=S3AutoCreatePath(),
trainingOutputS3DataPath=S3AutoCreatePath(),
trainingInstanceVolumeSizeInGB=1024,
trainingProjectedColumns=None,
trainingChannelName="train",
trainingContentType=None,
trainingS3DataDistribution="ShardedByS3Key",
trainingSparkDataFormat="sagemaker",
trainingSparkDataFormatOptions=None,
trainingInputMode="File",
trainingCompressionCodec=None,
trainingMaxRuntimeInSeconds=24*60*60,
trainingKmsKeyId=None,
modelEnvironmentVariables=None,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_CONSTRUCT,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
sagemakerRole=IAMRoleFromConfig(),
s3Client=SageMakerClients.create_s3_default_client(),
stsClient=SageMakerClients.create_sts_default_client(),
modelPrependInputRowsToTransformationRows=True,
deleteStagingDataAfterTraining=True,
namePolicyFactory=RandomNamePolicyFactory(),
uid=None):
if trainingSparkDataFormatOptions is None:
trainingSparkDataFormatOptions = {}
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
if hyperParameters is None:
hyperParameters = {}
if uid is None:
uid = Identifiable._randomUID()
kwargs = locals().copy()
del kwargs['self']
super(SageMakerEstimator, self).__init__(**kwargs)
def _get_java_obj(self, **kwargs):
return self._new_java_obj(
SageMakerEstimator._wrapped_class,
kwargs['trainingImage'],
kwargs['modelImage'],
kwargs['sagemakerRole'],
kwargs['trainingInstanceType'],
kwargs['trainingInstanceCount'],
kwargs['endpointInstanceType'],
kwargs['endpointInitialInstanceCount'],
kwargs['requestRowSerializer'],
kwargs['responseRowDeserializer'],
kwargs['trainingInputS3DataPath'],
kwargs['trainingOutputS3DataPath'],
kwargs['trainingInstanceVolumeSizeInGB'],
Option(kwargs['trainingProjectedColumns']),
kwargs['trainingChannelName'],
Option(kwargs['trainingContentType']),
kwargs['trainingS3DataDistribution'],
kwargs['trainingSparkDataFormat'],
kwargs['trainingSparkDataFormatOptions'],
kwargs['trainingInputMode'],
Option(kwargs['trainingCompressionCodec']),
kwargs['trainingMaxRuntimeInSeconds'],
Option(kwargs['trainingKmsKeyId']),
kwargs['modelEnvironmentVariables'],
kwargs['endpointCreationPolicy'],
kwargs['sagemakerClient'],
kwargs['s3Client'],
kwargs['stsClient'],
kwargs['modelPrependInputRowsToTransformationRows'],
kwargs['deleteStagingDataAfterTraining'],
kwargs['namePolicyFactory'],
kwargs['uid'],
kwargs['hyperParameters']
) | /sagemaker_pyspark-1.4.5.tar.gz/sagemaker_pyspark-1.4.5/src/sagemaker_pyspark/SageMakerEstimator.py | 0.914624 | 0.412116 | SageMakerEstimator.py | pypi |
from pyspark import keyword_only
from pyspark.ml.util import Identifiable
from pyspark.ml.wrapper import JavaModel
from sagemaker_pyspark import (SageMakerJavaWrapper, Option, EndpointCreationPolicy,
RandomNamePolicy, SageMakerClients)
class SageMakerModel(SageMakerJavaWrapper, JavaModel):
"""
A Model implementation which transforms a DataFrame by making requests to a SageMaker Endpoint.
Manages life cycle of all necessary SageMaker entities, including Model, EndpointConfig,
and Endpoint.
This Model transforms one DataFrame to another by repeated, distributed SageMaker Endpoint
invocation.
Each invocation request body is formed by concatenating input DataFrame Rows serialized to
Byte Arrays by the specified
:class:`~sagemaker_pyspark.transformation.serializers.RequestRowSerializer`. The
invocation request content-type property is set from
:attr:`RequestRowSerializer.contentType`. The invocation request accepts property is set
from :attr:`ResponseRowDeserializer.accepts`.
The transformed DataFrame is produced by deserializing each invocation response body into a
series of Rows. Row deserialization is delegated to the specified
:class:`~sagemaker_pyspark.transformation.deserializers.ResponseRowDeserializer`. If
prependInputRows is false, the transformed DataFrame
will contain just these Rows. If prependInputRows is true, then each transformed Row is a
concatenation of the input Row with its corresponding SageMaker invocation deserialized Row.
Each invocation of :meth:`~sagemaker_pyspark.JavaSageMakerModel.transform` passes the
:attr:`Dataset.schema` of the input DataFrame to requestRowSerialize by invoking
:meth:`RequestRowSerializer.setSchema`.
The specified RequestRowSerializer also controls the validity of input Row Schemas for
this Model. Schema validation is carried out on each call to
:meth:`~sagemaker_pyspark.JavaSageMakerModel.transformSchema`, which invokes
:meth:`RequestRowSerializer.validateSchema`.
Adapting this SageMaker model to the data format and type of a specific Endpoint is achieved by
sub-classing RequestRowSerializer and ResponseRowDeserializer.
Examples of a Serializer and Deseralizer are
:class:`~sagemaker_pyspark.transformation.serializers.LibSVMRequestRowSerializer` and
:class:`~sagemaker_pyspark.transformation.deserializers.LibSVMResponseRowDeserializer`
respectively.
Args:
endpointInstanceType (str): The instance type used to run the model container
endpointInitialInstanceCount (int): The initial number of instances used to host the model
requestRowSerializer (RequestRowSerializer): Serializes a Row to an Array of Bytes
responseRowDeserializer (ResponseRowDeserializer): Deserializes an Array of Bytes to a
series of Rows
existingEndpointName (str): An endpoint name
modelImage (str): A Docker image URI
modelPath (str): An S3 location that a successfully completed SageMaker Training Job has
stored its model output to.
modelEnvironmentVariables (dict): The environment variables that SageMaker will set on the
model container during execution.
modelExecutionRoleARN (str): The IAM Role used by SageMaker when running the hosted Model
and to download model data from S3
endpointCreationPolicy (EndpointCreationPolicy): Whether the endpoint is created upon
SageMakerModel construction, transformation, or not at all.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send CreateTrainingJob,
CreateModel, and CreateEndpoint requests.
prependResultRows (bool): Whether the transformation result should also include the input
Rows. If true, each output Row is formed by a concatenation of the input Row with the
corresponding Row produced by SageMaker invocation, produced by responseRowDeserializer.
If false, each output Row is just taken from responseRowDeserializer.
namePolicy (NamePolicy): The NamePolicy to use when naming SageMaker entities created during
usage of this Model.
uid (str): The unique identifier of this Estimator. Used to represent this stage in Spark ML
pipelines.
"""
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.SageMakerModel"
@keyword_only
def __init__(self,
endpointInstanceType,
endpointInitialInstanceCount,
requestRowSerializer,
responseRowDeserializer,
existingEndpointName=None,
modelImage=None,
modelPath=None,
modelEnvironmentVariables=None,
modelExecutionRoleARN=None,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_CONSTRUCT,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
prependResultRows=True,
namePolicy=RandomNamePolicy(),
uid=None,
javaObject=None):
super(SageMakerModel, self).__init__()
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
if javaObject:
self._java_obj = javaObject
else:
if uid is None:
uid = Identifiable._randomUID()
self._java_obj = self._new_java_obj(
SageMakerModel._wrapped_class,
Option(endpointInstanceType),
Option(endpointInitialInstanceCount),
requestRowSerializer,
responseRowDeserializer,
Option(existingEndpointName),
Option(modelImage),
Option(modelPath),
modelEnvironmentVariables,
Option(modelExecutionRoleARN),
endpointCreationPolicy,
sagemakerClient,
prependResultRows,
namePolicy,
uid
)
self._resetUid(self._call_java("uid"))
@classmethod
def fromTrainingJob(cls,
trainingJobName,
modelImage,
modelExecutionRoleARN,
endpointInstanceType,
endpointInitialInstanceCount,
requestRowSerializer,
responseRowDeserializer,
modelEnvironmentVariables=None,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_CONSTRUCT,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
prependResultRows=True,
namePolicy=RandomNamePolicy(),
uid="sagemaker"):
""" Creates a JavaSageMakerModel from a successfully completed training job name.
The returned JavaSageMakerModel can be used to transform DataFrames.
Args:
trainingJobName (str): Name of the successfully completed training job.
modelImage (str): URI of the image that will serve model inferences.
modelExecutionRoleARN (str): The IAM Role used by SageMaker when running the hosted
Model and to download model data from S3.
endpointInstanceType (str): The instance type used to run the model container.
endpointInitialInstanceCount (int): The initial number of instances used to host the
model.
requestRowSerializer (RequestRowSerializer): Serializes a row to an array of bytes.
responseRowDeserializer (ResponseRowDeserializer): Deserializes an array of bytes to a
series of rows.
modelEnvironmentVariables: The environment variables that SageMaker will set on the
model container during execution.
endpointCreationPolicy (EndpointCreationPolicy): Whether the endpoint is created upon
SageMakerModel construction, transformation, or not at all.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send
CreateTrainingJob, CreateModel, and CreateEndpoint requests.
prependResultRows (bool): Whether the transformation result should also include the
input Rows. If true, each output Row is formed by a concatenation of the input Row
with the corresponding Row produced by SageMaker invocation, produced by
responseRowDeserializer. If false, each output Row is just taken from
responseRowDeserializer.
namePolicy (NamePolicy): The NamePolicy to use when naming SageMaker entities created
during usage of the returned model.
uid (String): The unique identifier of the SageMakerModel. Used to represent the stage
in Spark ML pipelines.
Returns:
JavaSageMakerModel: a JavaSageMakerModel that sends InvokeEndpoint requests to an
endpoint hosting the training job's model.
"""
scala_function = "%s.fromTrainingJob" % SageMakerModel._wrapped_class
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
model_java_obj = SageMakerJavaWrapper()._new_java_obj(
scala_function,
trainingJobName,
modelImage,
modelExecutionRoleARN,
endpointInstanceType,
endpointInitialInstanceCount,
requestRowSerializer,
responseRowDeserializer,
modelEnvironmentVariables,
endpointCreationPolicy,
sagemakerClient,
prependResultRows,
namePolicy,
uid)
return SageMakerModel(
endpointInstanceType=endpointInstanceType,
endpointInitialInstanceCount=endpointInitialInstanceCount,
requestRowSerializer=requestRowSerializer,
responseRowDeserializer=responseRowDeserializer,
javaObject=model_java_obj)
@classmethod
def fromEndpoint(cls,
endpointName,
requestRowSerializer,
responseRowDeserializer,
modelEnvironmentVariables=None,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
prependResultRows=True,
namePolicy=RandomNamePolicy(),
uid="sagemaker"):
""" Creates a JavaSageMakerModel from existing model data in S3.
The returned JavaSageMakerModel can be used to transform Dataframes.
Args:
endpointName (str): The name of an endpoint that is currently in service.
requestRowSerializer (RequestRowSerializer): Serializes a row to an array of bytes.
responseRowDeserializer (ResponseRowDeserializer): Deserializes an array of bytes to a
series of rows.
modelEnvironmentVariables: The environment variables that SageMaker will set on the
model container during execution.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send
CreateTrainingJob, CreateModel, and CreateEndpoint requests.
prependResultRows (bool): Whether the transformation result should also include the
input Rows. If true, each output Row is formed by a concatenation of the input Row
with the corresponding Row produced by SageMaker invocation, produced by
responseRowDeserializer. If false, each output Row is just taken from
responseRowDeserializer.
namePolicy (NamePolicy): The NamePolicy to use when naming SageMaker entities created
during usage of the returned model.
uid (String): The unique identifier of the SageMakerModel. Used to represent the stage
in Spark ML pipelines.
Returns:
JavaSageMakerModel:
A JavaSageMakerModel that sends InvokeEndpoint requests to an endpoint hosting
the training job's model.
"""
scala_function = "%s.fromEndpoint" % SageMakerModel._wrapped_class
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
model_java_obj = SageMakerJavaWrapper()._new_java_obj(
scala_function,
endpointName,
requestRowSerializer,
responseRowDeserializer,
modelEnvironmentVariables,
sagemakerClient,
prependResultRows,
namePolicy,
uid)
return SageMakerModel(
endpointInstanceType=None,
endpointInitialInstanceCount=None,
requestRowSerializer=requestRowSerializer,
responseRowDeserializer=responseRowDeserializer,
javaObject=model_java_obj)
@classmethod
def fromModelS3Path(cls,
modelPath,
modelImage,
modelExecutionRoleARN,
endpointInstanceType,
endpointInitialInstanceCount,
requestRowSerializer,
responseRowDeserializer,
modelEnvironmentVariables=None,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_CONSTRUCT,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
prependResultRows=True,
namePolicy=RandomNamePolicy(),
uid="sagemaker"):
""" Creates a JavaSageMakerModel from existing model data in S3.
The returned JavaSageMakerModel can be used to transform Dataframes.
Args:
modelPath (str): The S3 URI to the model data to host.
modelImage (str): The URI of the image that will serve model inferences.
modelExecutionRoleARN (str): The IAM Role used by SageMaker when running the hosted
Model and to download model data from S3.
endpointInstanceType (str): The instance type used to run the model container.
endpointInitialInstanceCount (int): The initial number of instances used to host the
model.
requestRowSerializer (RequestRowSerializer): Serializes a row to an array of bytes.
responseRowDeserializer (ResponseRowDeserializer): Deserializes an array of bytes to a
series of rows.
modelEnvironmentVariables: The environment variables that SageMaker will set on the
model container during execution.
endpointCreationPolicy (EndpointCreationPolicy): Whether the endpoint is created upon
SageMakerModel construction, transformation, or not at all.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send
CreateTrainingJob, CreateModel, and CreateEndpoint requests.
prependResultRows (bool): Whether the transformation result should also include the
input Rows. If true, each output Row is formed by a concatenation of the input Row
with the corresponding Row produced by SageMaker invocation, produced by
responseRowDeserializer. If false, each output Row is just taken from
responseRowDeserializer.
namePolicy (NamePolicy): The NamePolicy to use when naming SageMaker entities created
during usage of the returned model.
uid (String): The unique identifier of the SageMakerModel. Used to represent the stage
in Spark ML pipelines.
Returns:
JavaSageMakerModel:
A JavaSageMakerModel that sends InvokeEndpoint requests to an endpoint hosting
the training job's model.
"""
scala_function = "%s.fromModelS3Path" % SageMakerModel._wrapped_class
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
model_java_obj = SageMakerJavaWrapper()._new_java_obj(
scala_function,
modelPath,
modelImage,
modelExecutionRoleARN,
endpointInstanceType,
endpointInitialInstanceCount,
requestRowSerializer,
responseRowDeserializer,
modelEnvironmentVariables,
endpointCreationPolicy,
sagemakerClient,
prependResultRows,
namePolicy,
uid)
return SageMakerModel(
endpointInstanceType=endpointInstanceType,
endpointInitialInstanceCount=endpointInitialInstanceCount,
requestRowSerializer=requestRowSerializer,
responseRowDeserializer=responseRowDeserializer,
javaObject=model_java_obj)
@property
def endpointInstanceType(self):
return self._call_java("endpointInstanceType")
@property
def endpointInitialInstanceCount(self):
return self._call_java("endpointInitialInstanceCount")
@property
def requestRowSerializer(self):
return self._call_java("requestRowSerializer")
@property
def responseRowDeserializer(self):
return self._call_java("responseRowDeserializer")
@property
def existingEndpointName(self):
return self._call_java("existingEndpointName")
@property
def modelImage(self):
return self._call_java("modelImage")
@property
def modelPath(self):
return self._call_java("modelPath")
@property
def modelEnvironmentVariables(self):
return self._call_java("modelEnvironmentVariables")
@property
def modelExecutionRoleARN(self):
return self._call_java("modelExecutionRoleARN")
@property
def sagemakerClient(self):
return self._call_java("sagemakerClient")
@property
def endpointCreationPolicy(self):
return self._call_java("endpointCreationPolicy")
@property
def prependResultRows(self):
return self._call_java("prependResultRows")
@property
def namePolicy(self):
return self._call_java("namePolicy")
@property
def endpointName(self):
return self._call_java("endpointName")
@property
def resourceCleanup(self):
return self._call_java("resourceCleanup")
def getCreatedResources(self):
return self._call_java("getCreatedResources")
def transform(self, dataset):
return self._call_java("transform", dataset)
def transformSchema(self, schema):
return self._call_java("transformSchema", schema)
def _to_java(self):
return self._java_obj
@classmethod
def _from_java(cls, JavaObject):
return SageMakerModel(endpointInstanceType=None,
endpointInitialInstanceCount=None,
requestRowSerializer=None,
responseRowDeserializer=None,
javaObject=JavaObject)
def _transform(self, dataset):
pass | /sagemaker_pyspark-1.4.5.tar.gz/sagemaker_pyspark-1.4.5/src/sagemaker_pyspark/SageMakerModel.py | 0.927573 | 0.343755 | SageMakerModel.py | pypi |
from abc import ABCMeta
from sagemaker_pyspark import SageMakerJavaWrapper, Option
class RequestRowSerializer(SageMakerJavaWrapper):
__metaclass__ = ABCMeta
def setSchema(self, schema):
"""
Sets the rowSchema for this RequestRowSerializer.
Args:
schema (StructType): the schema that this RequestRowSerializer will use.
"""
self._call_java("setSchema", schema)
class UnlabeledCSVRequestRowSerializer(RequestRowSerializer):
"""
Serializes according to the current implementation of the scoring service.
Args:
schema (StructType): tbd
featuresColumnName (str): name of the features column.
"""
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.transformation." \
"serializers.UnlabeledCSVRequestRowSerializer"
def __init__(self, schema=None, featuresColumnName="features"):
self.schema = schema
self.featuresColumnName = featuresColumnName
self._java_obj = None
def _to_java(self):
if self._java_obj is None:
self._java_obj = self._new_java_obj(
UnlabeledCSVRequestRowSerializer._wrapped_class,
Option(self.schema),
self.featuresColumnName)
return self._java_obj
@classmethod
def _from_java(cls, JavaObject):
raise NotImplementedError()
class ProtobufRequestRowSerializer(RequestRowSerializer):
"""
A RequestRowSerializer for converting labeled rows to SageMaker Protobuf-in-recordio request
data.
Args:
schema (StructType): The schema of Rows being serialized. This parameter is optional as
the schema may not be known when this serializer is constructed.
"""
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.transformation." \
"serializers.ProtobufRequestRowSerializer"
def __init__(self, schema=None, featuresColumnName="features"):
self.schema = schema
self.featuresColumnName = featuresColumnName
self._java_obj = None
def _to_java(self):
if self._java_obj is None:
self._java_obj = self._new_java_obj(
ProtobufRequestRowSerializer._wrapped_class,
Option(self.schema),
self.featuresColumnName)
return self._java_obj
@classmethod
def _from_java(cls, JavaObject):
raise NotImplementedError()
class LibSVMRequestRowSerializer(RequestRowSerializer):
"""
Extracts a label column and features column from a Row and serializes as a LibSVM record.
Each Row must contain a Double column and a Vector column containing the label and features
respectively. Row field indexes for the label and features are obtained by looking up the
index of labelColumnName and featuresColumnName respectively in the specified schema.
A schema must be specified before this RequestRowSerializer can be used by a client. The
schema is set either on instantiation of this RequestRowSerializer or by
:meth:`RequestRowSerializer.setSchema`.
Args:
schema (StructType): The schema of Rows being serialized. This parameter is optional as
the schema may not be known when this serializer is constructed.
labelColumnName (str): The name of the label column.
featuresColumnName (str): The name of the features column.
"""
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.transformation." \
"serializers.LibSVMRequestRowSerializer"
def __init__(self, schema=None, labelColumnName="label", featuresColumnName="features"):
self.schema = schema
self.labelColumnName = labelColumnName
self.featuresColumnName = featuresColumnName
self._java_obj = None
def _to_java(self):
if self._java_obj is None:
self._java_obj = self._new_java_obj(
LibSVMRequestRowSerializer._wrapped_class,
Option(self.schema),
self.labelColumnName,
self.featuresColumnName)
return self._java_obj
@classmethod
def _from_java(cls, JavaObject):
raise NotImplementedError() | /sagemaker_pyspark-1.4.5.tar.gz/sagemaker_pyspark-1.4.5/src/sagemaker_pyspark/transformation/serializers/serializers.py | 0.906712 | 0.163445 | serializers.py | pypi |
from pyspark.ml.param import Params, Param, TypeConverters
from pyspark.ml.util import Identifiable
from sagemaker_pyspark import (SageMakerEstimatorBase, S3AutoCreatePath, Option, IAMRoleFromConfig,
EndpointCreationPolicy, SageMakerClients, RandomNamePolicyFactory)
from sagemaker_pyspark.transformation.serializers import ProtobufRequestRowSerializer
from sagemaker_pyspark.transformation.deserializers import PCAProtobufResponseRowDeserializer
class PCASageMakerEstimator(SageMakerEstimatorBase):
"""
A :class:`~sagemaker_pyspark.SageMakerEstimator` that runs a PCA training job in SageMaker and
returns a :class:`~sagemaker_pyspark.SageMakerModel` that can be used to transform a DataFrame
using the hosted PCA model. PCA, or Principal Component Analysis, is useful for reducing the
dimensionality of data before training with another algorithm.
Amazon SageMaker PCA trains on RecordIO-encoded Amazon Record protobuf data.
SageMaker pyspark writes a DataFrame to S3 by selecting a column of Vectors named "features"
and, if present, a column of Doubles named "label". These names are configurable by passing a
dictionary with entries in trainingSparkDataFormatOptions with key "labelColumnName" or
"featuresColumnName", with values corresponding to the desired label and features columns.
PCASageMakerEstimator uses
:class:`~sagemaker_pyspark.transformation.serializers.ProtobufRequestRowSerializer` to serialize
Rows into RecordIO-encoded Amazon Record protobuf messages for inference, by default selecting
the column named "features" expected to contain a Vector of Doubles.
Inferences made against an Endpoint hosting a PCA model contain a "projection" field appended
to the input DataFrame as a Dense Vector of Doubles.
Args:
sageMakerRole (IAMRole): The SageMaker TrainingJob and Hosting IAM Role. Used by
SageMaker to access S3 and ECR Resources. SageMaker hosted Endpoint instances
launched by this Estimator run with this role.
trainingInstanceType (str): The SageMaker TrainingJob Instance Type to use.
trainingInstanceCount (int): The number of instances of instanceType to run an
SageMaker Training Job with.
endpointInstanceType (str): The SageMaker Endpoint Config instance type.
endpointInitialInstanceCount (int): The SageMaker Endpoint Config minimum number of
instances that can be used to host modelImage.
requestRowSerializer (RequestRowSerializer): Serializes Spark DataFrame Rows for
transformation by Models built from this Estimator.
responseRowDeserializer (ResponseRowDeserializer): Deserializes an Endpoint response into a
series of Rows.
trainingInputS3DataPath (S3Resource): An S3 location to upload SageMaker Training Job input
data to.
trainingOutputS3DataPath (S3Resource): An S3 location for SageMaker to store Training Job
output data to.
trainingInstanceVolumeSizeInGB (int): The EBS volume size in gigabytes of each instance.
trainingProjectedColumns (List): The columns to project from the Dataset being fit before
training. If an Optional.empty is passed then no specific projection will occur and
all columns will be serialized.
trainingChannelName (str): The SageMaker Channel name to input serialized Dataset fit
input to.
trainingContentType (str): The MIME type of the training data.
trainingS3DataDistribution (str): The SageMaker Training Job S3 data distribution scheme.
trainingSparkDataFormat (str): The Spark Data Format name used to serialize the Dataset
being fit for input to SageMaker.
trainingSparkDataFormatOptions (dict): The Spark Data Format Options used during
serialization of the Dataset being fit.
trainingInputMode (str): The SageMaker Training Job Channel input mode.
trainingCompressionCodec (str): The type of compression to use when serializing the
Dataset being fit for input to SageMaker.
trainingMaxRuntimeInSeconds (int): A SageMaker Training Job Termination Condition
MaxRuntimeInHours.
trainingKmsKeyId (str): A KMS key ID for the Output Data Source.
modelEnvironmentVariables (dict): The environment variables that SageMaker will set on the
model container during execution.
endpointCreationPolicy (EndpointCreationPolicy): Defines how a SageMaker Endpoint
referenced by a SageMakerModel is created.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send CreateTrainingJob,
CreateModel, and CreateEndpoint requests.
region (str): The region in which to run the algorithm. If not specified, gets the region
from the DefaultAwsRegionProviderChain.
s3Client (AmazonS3): Used to create a bucket for staging SageMaker Training Job
input and/or output if either are set to S3AutoCreatePath.
stsClient (AmazonSTS): Used to resolve the account number when creating staging
input / output buckets.
modelPrependInputRowsToTransformationRows (bool): Whether the transformation result on
Models built by this Estimator should also include the input Rows. If true,
each output Row is formed by a concatenation of the input Row with the corresponding
Row produced by SageMaker Endpoint invocation, produced by responseRowDeserializer.
If false, each output Row is just taken from responseRowDeserializer.
deleteStagingDataAfterTraining (bool): Whether to remove the training data on s3 after
training is complete or failed.
namePolicyFactory (NamePolicyFactory): The NamePolicyFactory to use when naming SageMaker
entities created during fit.
uid (str): The unique identifier of this Estimator. Used to represent this stage in Spark
ML pipelines.
"""
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.algorithms.PCASageMakerEstimator"
num_components = Param(Params._dummy(), "num_components",
"Number of principal components we wish to compute. Must be > 0",
typeConverter=TypeConverters.toInt)
algorithm_mode = Param(Params._dummy(), "algorithm_mode",
"Determines the algorithm computing the principal components" +
"Supported options: 'regular', 'stable' and 'randomized'.",
typeConverter=TypeConverters.toString)
subtract_mean = Param(Params._dummy(), "subtract_mean",
"If true, the data will be unbiased both during training and " +
"inference",
typeConverter=TypeConverters.toString)
extra_components = Param(Params._dummy(), "extra_components",
"Number of extra components to compute" +
"Valid for 'randomized' mode. Ignored by other modes."
" Must be -1 or > 0",
typeConverter=TypeConverters.toInt)
mini_batch_size = Param(Params._dummy(), "mini_batch_size",
"The number of examples in a mini-batch. Must be > 0",
typeConverter=TypeConverters.toInt)
feature_dim = Param(Params._dummy(), "feature_dim",
"The dimension of the input vectors. Must be > 0",
typeConverter=TypeConverters.toInt)
def __init__(self,
trainingInstanceType,
trainingInstanceCount,
endpointInstanceType,
endpointInitialInstanceCount,
sagemakerRole=IAMRoleFromConfig(),
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=PCAProtobufResponseRowDeserializer(),
trainingInputS3DataPath=S3AutoCreatePath(),
trainingOutputS3DataPath=S3AutoCreatePath(),
trainingInstanceVolumeSizeInGB=1024,
trainingProjectedColumns=None,
trainingChannelName="train",
trainingContentType=None,
trainingS3DataDistribution="ShardedByS3Key",
trainingSparkDataFormat="sagemaker",
trainingSparkDataFormatOptions=None,
trainingInputMode="File",
trainingCompressionCodec=None,
trainingMaxRuntimeInSeconds=24*60*60,
trainingKmsKeyId=None,
modelEnvironmentVariables=None,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_CONSTRUCT,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
region=None,
s3Client=SageMakerClients.create_s3_default_client(),
stsClient=SageMakerClients.create_sts_default_client(),
modelPrependInputRowsToTransformationRows=True,
deleteStagingDataAfterTraining=True,
namePolicyFactory=RandomNamePolicyFactory(),
uid=None):
if trainingSparkDataFormatOptions is None:
trainingSparkDataFormatOptions = {}
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
if uid is None:
uid = Identifiable._randomUID()
kwargs = locals().copy()
del kwargs['self']
super(PCASageMakerEstimator, self).__init__(**kwargs)
default_params = {
'subtract_mean': 'True'
}
self._setDefault(**default_params)
def _get_java_obj(self, **kwargs):
return self._new_java_obj(
PCASageMakerEstimator._wrapped_class,
kwargs['sagemakerRole'],
kwargs['trainingInstanceType'],
kwargs['trainingInstanceCount'],
kwargs['endpointInstanceType'],
kwargs['endpointInitialInstanceCount'],
kwargs['requestRowSerializer'],
kwargs['responseRowDeserializer'],
kwargs['trainingInputS3DataPath'],
kwargs['trainingOutputS3DataPath'],
kwargs['trainingInstanceVolumeSizeInGB'],
Option(kwargs['trainingProjectedColumns']),
kwargs['trainingChannelName'],
Option(kwargs['trainingContentType']),
kwargs['trainingS3DataDistribution'],
kwargs['trainingSparkDataFormat'],
kwargs['trainingSparkDataFormatOptions'],
kwargs['trainingInputMode'],
Option(kwargs['trainingCompressionCodec']),
kwargs['trainingMaxRuntimeInSeconds'],
Option(kwargs['trainingKmsKeyId']),
kwargs['modelEnvironmentVariables'],
kwargs['endpointCreationPolicy'],
kwargs['sagemakerClient'],
Option(kwargs['region']),
kwargs['s3Client'],
kwargs['stsClient'],
kwargs['modelPrependInputRowsToTransformationRows'],
kwargs['deleteStagingDataAfterTraining'],
kwargs['namePolicyFactory'],
kwargs['uid']
)
def getNumComponents(self):
return self.getOrDefault(self.num_components)
def setNumComponents(self, value):
if value < 1:
raise ValueError("num_components must be > 0, got: %s" % value)
self._set(num_components=value)
def getAlgorithmMode(self):
return self.getOrDefault(self.algorithm_mode)
def setAlgorithmMode(self, value):
if value not in ('regular', 'stable', 'randomized'):
raise ValueError("AlgorithmMode must be 'random', 'stable' or 'randomized',"
" got %s" % value)
self._set(algorithm_mode=value)
def getSubtractMean(self):
value = self.getOrDefault(self.subtract_mean)
if value == 'True':
return True
else:
return False
def setSubtractMean(self, value):
if value not in ('True', 'False'):
raise ValueError("SubtractMean must be 'True' or 'False', got %s" % value)
self._set(subtract_mean=value)
def getExtraComponents(self):
return self.getOrDefault(self.extra_components)
def setExtraComponents(self, value):
if value != -1 and value < 1:
raise ValueError("ExtraComponents must be > 0 or -1, got : %s" % value)
self._set(extra_components=value)
def getMiniBatchSize(self):
return self.getOrDefault(self.mini_batch_size)
def setMiniBatchSize(self, size):
if size <= 0:
raise ValueError("mini_batch_size must be > 0. Got %s" % size)
self._set(mini_batch_size=size)
def getFeatureDim(self):
return self.getOrDefault(self.feature_dim)
def setFeatureDim(self, value):
if value <= 0:
raise ValueError("feature_dim must be > 0. Got %s" % value)
self._set(feature_dim=value)
@classmethod
def _from_java(cls, javaObject):
return PCASageMakerEstimator(sagemakerRole=None, javaObject=javaObject) | /sagemaker_pyspark-1.4.5.tar.gz/sagemaker_pyspark-1.4.5/src/sagemaker_pyspark/algorithms/PCASageMakerEstimator.py | 0.893176 | 0.610308 | PCASageMakerEstimator.py | pypi |
from pyspark.ml.param import Params, Param, TypeConverters
from pyspark.ml.util import Identifiable
from sagemaker_pyspark import (SageMakerEstimatorBase, S3AutoCreatePath, Option, IAMRoleFromConfig,
EndpointCreationPolicy, SageMakerClients, RandomNamePolicyFactory)
from sagemaker_pyspark.transformation.serializers import ProtobufRequestRowSerializer
from sagemaker_pyspark.transformation.deserializers import LDAProtobufResponseRowDeserializer
class LDASageMakerEstimator(SageMakerEstimatorBase):
"""
A :class:`~sagemaker_pyspark.SageMakerEstimator` runs a LDA training job on Amazon SageMaker
upon a call to :meth:`.fit` on a DataFrame and returns a
:class:`~sagemaker_pyspark.SageMakerModel`. LDA is unsupervised learning algorithm that
attempts to describe a set of observations as a mixture of
distinct categories. LDA is most commonly used to discover a user-specified number of topics
shared by documents within a text corpus.
Amazon SageMaker LDA trains on RecordIO-encoded Amazon Record protobuf data.
SageMaker pyspark writes a DataFrame to S3 by selecting a column of Vectors named "features".
These names are configurable by passing
a map with entries in trainingSparkDataFormatOptions with key "featuresColumnName",
with values corresponding to the desired feature column.
For inference, the SageMakerModel returned by :meth:`fit()` by the LDASageMakerEstimator uses
:class:`~sagemaker_pyspark.transformation.serializers.ProtobufRequestRowSerializer` to
serialize Rows into RecordIO-encoded Amazon Record protobuf messages for inference, by default
selecting the column named "features" expected to contain a Vector of Doubles.
Inferences made against an Endpoint hosting a LDA model contain a "topic_mixture" field
that holds a vector of Double values.
Args:
sageMakerRole (IAMRole): The SageMaker TrainingJob and Hosting IAM Role. Used by
SageMaker to access S3 and ECR Resources. SageMaker hosted Endpoint instances
launched by this Estimator run with this role.
trainingInstanceType (str): The SageMaker TrainingJob Instance Type to use.
trainingInstanceCount (int): The number of instances of instanceType to run an
SageMaker Training Job with.
endpointInstanceType (str): The SageMaker Endpoint Config instance type.
endpointInitialInstanceCount (int): The SageMaker Endpoint Config minimum number of
instances that can be used to host modelImage.
requestRowSerializer (RequestRowSerializer): Serializes Spark DataFrame Rows for
transformation by Models built from this Estimator.
responseRowDeserializer (ResponseRowDeserializer): Deserializes an Endpoint response into a
series of Rows.
trainingInputS3DataPath (S3Resource): An S3 location to upload SageMaker Training Job input
data to.
trainingOutputS3DataPath (S3Resource): An S3 location for SageMaker to store Training Job
output data to.
trainingInstanceVolumeSizeInGB (int): The EBS volume size in gigabytes of each instance.
trainingProjectedColumns (List): The columns to project from the Dataset being fit before
training. If an Optional.empty is passed then no specific projection will occur and
all columns will be serialized.
trainingChannelName (str): The SageMaker Channel name to input serialized Dataset fit
input to.
trainingContentType (str): The MIME type of the training data.
trainingS3DataDistribution (str): The SageMaker Training Job S3 data distribution scheme.
trainingSparkDataFormat (str): The Spark Data Format name used to serialize the Dataset
being fit for input to SageMaker.
trainingSparkDataFormatOptions (dict): The Spark Data Format Options used during
serialization of the Dataset being fit.
trainingInputMode (str): The SageMaker Training Job Channel input mode.
trainingCompressionCodec (str): The type of compression to use when serializing the
Dataset being fit for input to SageMaker.
trainingMaxRuntimeInSeconds (int): A SageMaker Training Job Termination Condition
MaxRuntimeInHours.
trainingKmsKeyId (str): A KMS key ID for the Output Data Source.
modelEnvironmentVariables (dict): The environment variables that SageMaker will set on the
model container during execution.
endpointCreationPolicy (EndpointCreationPolicy): Defines how a SageMaker Endpoint
referenced by a SageMakerModel is created.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send CreateTrainingJob,
CreateModel, and CreateEndpoint requests.
region (str): The region in which to run the algorithm. If not specified, gets the region
from the DefaultAwsRegionProviderChain.
s3Client (AmazonS3): Used to create a bucket for staging SageMaker Training Job
input and/or output if either are set to S3AutoCreatePath.
stsClient (AmazonSTS): Used to resolve the account number when creating staging
input / output buckets.
modelPrependInputRowsToTransformationRows (bool): Whether the transformation result on
Models built by this Estimator should also include the input Rows. If true,
each output Row is formed by a concatenation of the input Row with the corresponding
Row produced by SageMaker Endpoint invocation, produced by responseRowDeserializer.
If false, each output Row is just taken from responseRowDeserializer.
deleteStagingDataAfterTraining (bool): Whether to remove the training data on s3 after
training is complete or failed.
namePolicyFactory (NamePolicyFactory): The NamePolicyFactory to use when naming SageMaker
entities created during fit.
uid (str): The unique identifier of this Estimator. Used to represent this stage in Spark
ML pipelines.
"""
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.algorithms.LDASageMakerEstimator"
num_topics = Param(Params._dummy(),
"num_topics",
"The number of topics for LDA to find within the data. Must be > 0.",
typeConverter=TypeConverters.toInt)
alpha0 = Param(Params._dummy(),
"alpha0",
"Initial guess for the concentration parameter: the sum of the "
"elements of the Dirichlet prior. "
"Must be > 0.",
typeConverter=TypeConverters.toFloat)
max_restarts = Param(Params._dummy(),
"max_restarts",
"The number of restarts during decomposition phase. Must be > 0.",
typeConverter=TypeConverters.toInt)
max_iterations = Param(Params._dummy(),
"max_iterations",
"The maximum number of iterations to perform during the ALS phase "
"of the algorithm. Must be > 0.",
typeConverter=TypeConverters.toInt)
tol = Param(Params._dummy(),
"tol",
"Target error tolerance for the ALS phase of the algorithm. Must be > 0.",
typeConverter=TypeConverters.toFloat)
mini_batch_size = Param(Params._dummy(), "mini_batch_size",
"The total number of documents in the input document corpus. "
"Must be > 0",
typeConverter=TypeConverters.toInt)
feature_dim = Param(Params._dummy(), "feature_dim",
"The size of the vocabulary of the input document corpus. Must be > 0",
typeConverter=TypeConverters.toInt)
def __init__(self,
trainingInstanceType,
trainingInstanceCount,
endpointInstanceType,
endpointInitialInstanceCount,
sagemakerRole=IAMRoleFromConfig(),
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=LDAProtobufResponseRowDeserializer(),
trainingInputS3DataPath=S3AutoCreatePath(),
trainingOutputS3DataPath=S3AutoCreatePath(),
trainingInstanceVolumeSizeInGB=1024,
trainingProjectedColumns=None,
trainingChannelName="train",
trainingContentType=None,
trainingS3DataDistribution="ShardedByS3Key",
trainingSparkDataFormat="sagemaker",
trainingSparkDataFormatOptions=None,
trainingInputMode="File",
trainingCompressionCodec=None,
trainingMaxRuntimeInSeconds=24*60*60,
trainingKmsKeyId=None,
modelEnvironmentVariables=None,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_CONSTRUCT,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
region=None,
s3Client=SageMakerClients.create_s3_default_client(),
stsClient=SageMakerClients.create_sts_default_client(),
modelPrependInputRowsToTransformationRows=True,
deleteStagingDataAfterTraining=True,
namePolicyFactory=RandomNamePolicyFactory(),
uid=None):
if trainingSparkDataFormatOptions is None:
trainingSparkDataFormatOptions = {}
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
if uid is None:
uid = Identifiable._randomUID()
kwargs = locals().copy()
del kwargs['self']
super(LDASageMakerEstimator, self).__init__(**kwargs)
def _get_java_obj(self, **kwargs):
return self._new_java_obj(
LDASageMakerEstimator._wrapped_class,
kwargs['sagemakerRole'],
kwargs['trainingInstanceType'],
kwargs['trainingInstanceCount'],
kwargs['endpointInstanceType'],
kwargs['endpointInitialInstanceCount'],
kwargs['requestRowSerializer'],
kwargs['responseRowDeserializer'],
kwargs['trainingInputS3DataPath'],
kwargs['trainingOutputS3DataPath'],
kwargs['trainingInstanceVolumeSizeInGB'],
Option(kwargs['trainingProjectedColumns']),
kwargs['trainingChannelName'],
Option(kwargs['trainingContentType']),
kwargs['trainingS3DataDistribution'],
kwargs['trainingSparkDataFormat'],
kwargs['trainingSparkDataFormatOptions'],
kwargs['trainingInputMode'],
Option(kwargs['trainingCompressionCodec']),
kwargs['trainingMaxRuntimeInSeconds'],
Option(kwargs['trainingKmsKeyId']),
kwargs['modelEnvironmentVariables'],
kwargs['endpointCreationPolicy'],
kwargs['sagemakerClient'],
Option(kwargs['region']),
kwargs['s3Client'],
kwargs['stsClient'],
kwargs['modelPrependInputRowsToTransformationRows'],
kwargs['deleteStagingDataAfterTraining'],
kwargs['namePolicyFactory'],
kwargs['uid']
)
def getNumTopics(self):
return self.getOrDefault(self.num_topics)
def setNumTopics(self, value):
if value < 1:
raise ValueError("num_topics must be > 0, got: %s" % value)
self._set(num_topics=value)
def getAlpha0(self):
return self.getOrDefault(self.alpha0)
def setAlpha0(self, value):
if value <= 0:
raise ValueError("alpha0 must be > 0, got: %s" % value)
self._set(alpha0=value)
def getMaxRestarts(self):
return self.getOrDefault(self.max_restarts)
def setMaxRestarts(self, value):
if value < 1:
raise ValueError("max_restarts must be > 0, got: %s" % value)
self._set(max_restarts=value)
def getMaxIterations(self):
return self.getOrDefault(self.max_iterations)
def setMaxIterations(self, value):
if value < 1:
raise ValueError("max_iterations must be > 0, got: %s" % value)
self._set(max_iterations=value)
def getTol(self):
return self.getOrDefault(self.tol)
def setTol(self, value):
if value <= 0:
raise ValueError("tol must be > 0, got: %s" % value)
self._set(tol=value)
def getMiniBatchSize(self):
return self.getOrDefault(self.mini_batch_size)
def setMiniBatchSize(self, size):
if size <= 0:
raise ValueError("mini_batch_size must be > 0. Got %s" % size)
self._set(mini_batch_size=size)
def getFeatureDim(self):
return self.getOrDefault(self.feature_dim)
def setFeatureDim(self, value):
if value <= 0:
raise ValueError("feature_dim must be > 0. Got %s" % value)
self._set(feature_dim=value)
@classmethod
def _from_java(cls, javaObject):
return LDASageMakerEstimator(sagemakerRole=None, javaObject=javaObject) | /sagemaker_pyspark-1.4.5.tar.gz/sagemaker_pyspark-1.4.5/src/sagemaker_pyspark/algorithms/LDASageMakerEstimator.py | 0.911906 | 0.573738 | LDASageMakerEstimator.py | pypi |
import numbers
from pyspark.ml.param import Params, Param, TypeConverters
from pyspark.ml.util import Identifiable
from sagemaker_pyspark import (SageMakerEstimatorBase, S3AutoCreatePath, Option, IAMRoleFromConfig,
EndpointCreationPolicy, SageMakerClients, RandomNamePolicyFactory)
from sagemaker_pyspark.transformation.serializers import ProtobufRequestRowSerializer
from sagemaker_pyspark.transformation.deserializers import KMeansProtobufResponseRowDeserializer
class KMeansSageMakerEstimator(SageMakerEstimatorBase):
"""
A :class:`~sagemaker_pyspark.SageMakerEstimator` that runs a KMeans training job on
Amazon SageMaker upon a call to :meth:`.fit` and returns a
:class:`~sagemaker_pyspark.SageMakerModel` that can be used to transform a DataFrame using
the hosted K-Means model. K-Means Clustering is useful for grouping similar examples in your
dataset.
Amazon SageMaker K-Means clustering trains on RecordIO-encoded Amazon Record protobuf data.
SageMaker pyspark writes a DataFrame to S3 by selecting a column of Vectors named "features"
and, if present, a column of Doubles named "label". These names are configurable by passing a
dictionary with entries in `trainingSparkDataFormatOptions` with key "labelColumnName" or
"featuresColumnName", with values corresponding to the desired label and features columns.
For inference, the SageMakerModel returned by :meth:`fit()` by the KMeansSageMakerEstimator uses
:class:`~sagemaker_pyspark.transformation.serializers.ProtobufRequestRowSerializer` to
serialize Rows into RecordIO-encoded Amazon Record protobuf messages for inference, by default
selecting the column named "features" expected to contain a Vector of Doubles.
Inferences made against an Endpoint hosting a K-Means model contain a "closest_cluster" field
and a "distance_to_cluster" field, both appended to the input DataFrame as columns of Double.
Args:
sageMakerRole (IAMRole): The SageMaker TrainingJob and Hosting IAM Role. Used by
SageMaker to access S3 and ECR Resources. SageMaker hosted Endpoint instances
launched by this Estimator run with this role.
trainingInstanceType (str): The SageMaker TrainingJob Instance Type to use.
trainingInstanceCount (int): The number of instances of instanceType to run an
SageMaker Training Job with.
endpointInstanceType (str): The SageMaker Endpoint Config instance type.
endpointInitialInstanceCount (int): The SageMaker Endpoint Config minimum number of
instances that can be used to host modelImage.
requestRowSerializer (RequestRowSerializer): Serializes Spark DataFrame Rows for
transformation by Models built from this Estimator.
responseRowDeserializer (ResponseRowDeserializer): Deserializes an Endpoint response into a
series of Rows.
trainingInputS3DataPath (S3Resource): An S3 location to upload SageMaker Training Job input
data to.
trainingOutputS3DataPath (S3Resource): An S3 location for SageMaker to store Training Job
output data to.
trainingInstanceVolumeSizeInGB (int): The EBS volume size in gigabytes of each instance.
trainingProjectedColumns (List): The columns to project from the Dataset being fit before
training. If an Optional.empty is passed then no specific projection will occur and
all columns will be serialized.
trainingChannelName (str): The SageMaker Channel name to input serialized Dataset fit
input to.
trainingContentType (str): The MIME type of the training data.
trainingS3DataDistribution (str): The SageMaker Training Job S3 data distribution scheme.
trainingSparkDataFormat (str): The Spark Data Format name used to serialize the Dataset
being fit for input to SageMaker.
trainingSparkDataFormatOptions (dict): The Spark Data Format Options used during
serialization of the Dataset being fit.
trainingInputMode (str): The SageMaker Training Job Channel input mode.
trainingCompressionCodec (str): The type of compression to use when serializing the
Dataset being fit for input to SageMaker.
trainingMaxRuntimeInSeconds (int): A SageMaker Training Job Termination Condition
MaxRuntimeInHours.
trainingKmsKeyId (str): A KMS key ID for the Output Data Source.
modelEnvironmentVariables (dict): The environment variables that SageMaker will set on the
model container during execution.
endpointCreationPolicy (EndpointCreationPolicy): Defines how a SageMaker Endpoint
referenced by a SageMakerModel is created.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send CreateTrainingJob,
CreateModel, and CreateEndpoint requests.
region (str): The region in which to run the algorithm. If not specified, gets the region
from the DefaultAwsRegionProviderChain.
s3Client (AmazonS3): Used to create a bucket for staging SageMaker Training Job
input and/or output if either are set to S3AutoCreatePath.
stsClient (AmazonSTS): Used to resolve the account number when creating staging
input / output buckets.
modelPrependInputRowsToTransformationRows (bool): Whether the transformation result on
Models built by this Estimator should also include the input Rows. If true,
each output Row is formed by a concatenation of the input Row with the corresponding
Row produced by SageMaker Endpoint invocation, produced by responseRowDeserializer.
If false, each output Row is just taken from responseRowDeserializer.
deleteStagingDataAfterTraining (bool): Whether to remove the training data on s3 after
training is complete or failed.
namePolicyFactory (NamePolicyFactory): The NamePolicyFactory to use when naming SageMaker
entities created during fit.
uid (str): The unique identifier of this Estimator. Used to represent this stage in Spark
ML pipelines.
"""
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.algorithms.KMeansSageMakerEstimator"
k = Param(Params._dummy(),
"k",
"The number of clusters to create. Must be > 1",
typeConverter=TypeConverters.toInt)
init_method = Param(Params._dummy(),
"init_method",
"The initialization algorithm to choose centroids. "
"Supported options: 'random' and 'kmeans++'.",
typeConverter=TypeConverters.toString)
local_lloyd_max_iter = Param(Params._dummy(),
"local_lloyd_max_iter",
"Maximum iterations for LLoyds EM procedure "
"in the local kmeans used in finalized stage. Must be > 0",
typeConverter=TypeConverters.toInt)
local_lloyd_tol = Param(Params._dummy(),
"local_lloyd_tol",
"Tolerance for change in ssd for early stopping "
"in the local kmeans. Must be in range [0, 1].",
typeConverter=TypeConverters.toFloat)
local_lloyd_num_trials = Param(Params._dummy(),
"local_lloyd_num_trials",
"The number of trials of the local kmeans "
"algorithm. Must be > 0 or 'auto'",
typeConverter=TypeConverters.toString)
local_lloyd_init_method = Param(Params._dummy(),
"local_lloyd_init_method",
"The local initialization algorithm to choose centroids. "
"Supported options: 'random' and 'kmeans++'",
typeConverter=TypeConverters.toString)
half_life_time_size = Param(Params._dummy(), "half_life_time_size",
"The weight decaying rate of each point. Must be >= 0",
typeConverter=TypeConverters.toInt)
epochs = Param(Params._dummy(), "epochs",
"The number of passes done over the training data. Must be > 0",
typeConverter=TypeConverters.toInt)
extra_center_factor = Param(Params._dummy(), "extra_center_factor",
"The factor of extra centroids to create. "
"Must be > 0 or 'auto'",
typeConverter=TypeConverters.toString)
mini_batch_size = Param(Params._dummy(), "mini_batch_size",
"The number of examples in a mini-batch. Must be > 0",
typeConverter=TypeConverters.toInt)
feature_dim = Param(Params._dummy(), "feature_dim",
"The dimension of the input vectors. Must be > 0",
typeConverter=TypeConverters.toInt)
eval_metrics = Param(Params._dummy(), "eval_metrics",
"Metric to be used for scoring the model. String of comma separated"
" metrics. Supported metrics are 'msd' and 'ssd'."
" 'msd' Means Square Error, 'ssd': Sum of square distance",
typeConverter=TypeConverters.toString)
def __init__(self,
trainingInstanceType,
trainingInstanceCount,
endpointInstanceType,
endpointInitialInstanceCount,
sagemakerRole=IAMRoleFromConfig(),
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=KMeansProtobufResponseRowDeserializer(),
trainingInputS3DataPath=S3AutoCreatePath(),
trainingOutputS3DataPath=S3AutoCreatePath(),
trainingInstanceVolumeSizeInGB=1024,
trainingProjectedColumns=None,
trainingChannelName="train",
trainingContentType=None,
trainingS3DataDistribution="ShardedByS3Key",
trainingSparkDataFormat="sagemaker",
trainingSparkDataFormatOptions=None,
trainingInputMode="File",
trainingCompressionCodec=None,
trainingMaxRuntimeInSeconds=24*60*60,
trainingKmsKeyId=None,
modelEnvironmentVariables=None,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_CONSTRUCT,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
region=None,
s3Client=SageMakerClients.create_s3_default_client(),
stsClient=SageMakerClients.create_sts_default_client(),
modelPrependInputRowsToTransformationRows=True,
deleteStagingDataAfterTraining=True,
namePolicyFactory=RandomNamePolicyFactory(),
uid=None):
if trainingSparkDataFormatOptions is None:
trainingSparkDataFormatOptions = {}
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
if uid is None:
uid = Identifiable._randomUID()
kwargs = locals().copy()
del kwargs['self']
super(KMeansSageMakerEstimator, self).__init__(**kwargs)
default_params = {
'k': 2
}
self._setDefault(**default_params)
def _get_java_obj(self, **kwargs):
return self._new_java_obj(
KMeansSageMakerEstimator._wrapped_class,
kwargs['sagemakerRole'],
kwargs['trainingInstanceType'],
kwargs['trainingInstanceCount'],
kwargs['endpointInstanceType'],
kwargs['endpointInitialInstanceCount'],
kwargs['requestRowSerializer'],
kwargs['responseRowDeserializer'],
kwargs['trainingInputS3DataPath'],
kwargs['trainingOutputS3DataPath'],
kwargs['trainingInstanceVolumeSizeInGB'],
Option(kwargs['trainingProjectedColumns']),
kwargs['trainingChannelName'],
Option(kwargs['trainingContentType']),
kwargs['trainingS3DataDistribution'],
kwargs['trainingSparkDataFormat'],
kwargs['trainingSparkDataFormatOptions'],
kwargs['trainingInputMode'],
Option(kwargs['trainingCompressionCodec']),
kwargs['trainingMaxRuntimeInSeconds'],
Option(kwargs['trainingKmsKeyId']),
kwargs['modelEnvironmentVariables'],
kwargs['endpointCreationPolicy'],
kwargs['sagemakerClient'],
Option(kwargs['region']),
kwargs['s3Client'],
kwargs['stsClient'],
kwargs['modelPrependInputRowsToTransformationRows'],
kwargs['deleteStagingDataAfterTraining'],
kwargs['namePolicyFactory'],
kwargs['uid']
)
def getK(self):
return self.getOrDefault(self.k)
def setK(self, value):
if value < 2:
raise ValueError("K must be >= 2, got: %s" % value)
self._set(k=value)
def getMaxIter(self):
return self.getOrDefault(self.local_lloyd_max_iter)
def setMaxIter(self, value):
if value < 1:
raise ValueError("MaxIter must be > 0, got: %s" % value)
self._set(local_lloyd_max_iter=value)
def getTol(self):
return self.getOrDefault(self.local_lloyd_tol)
def setTol(self, value):
if value > 1 or value < 0:
raise ValueError("Tol must be within [0, 1], got: %s" % value)
self._set(local_lloyd_tol=value)
def getLocalInitMethod(self):
return self.getOrDefault(self.local_lloyd_init_method)
def setLocalInitMethod(self, value):
if value not in ('random', 'kmeans++'):
raise ValueError("LocalInitMethod must be 'random' or 'kmeans++', got %s" % value)
self._set(local_lloyd_init_method=value)
def getHalflifeTime(self):
return self.getOrDefault(self.half_life_time_size)
def setHalflifeTime(self, value):
if value < 0:
raise ValueError("HalflifeTime must be >=0, got: %s" % value)
self._set(half_life_time_size=value)
def getEpochs(self):
return self.getOrDefault(self.epochs)
def setEpochs(self, value):
if value < 1:
raise ValueError("Epochs must be > 0, got: %s" % value)
self._set(epochs=value)
def getInitMethod(self):
return self.getOrDefault(self.init_method)
def setInitMethod(self, value):
if value not in ('random', 'kmeans++'):
raise ValueError("InitMethod must be 'random' or 'kmeans++', got: %s" % value)
self._set(init_method=value)
def getCenterFactor(self):
return self.getOrDefault(self.extra_center_factor)
def setCenterFactor(self, value):
if isinstance(value, numbers.Real) and value < 1:
raise ValueError("CenterFactor must be 'auto' or > 0, got: %s" % value)
if value != 'auto' and int(value) < 1:
raise ValueError("CenterFactor must be 'auto' or > 0, got: %s" % value)
self._set(extra_center_factor=str(value))
def getTrialNum(self):
return self.getOrDefault(self.local_lloyd_num_trials)
def setTrialNum(self, value):
if isinstance(value, numbers.Real) and value < 1:
raise ValueError("TrialNum must be 'auto' or > 0, got: %s" % value)
if value != 'auto' and int(value) < 1:
raise ValueError("TrialNum must be 'auto' or > 0, got: %s" % value)
self._set(local_lloyd_num_trials=str(value))
def getMiniBatchSize(self):
return self.getOrDefault(self.mini_batch_size)
def setMiniBatchSize(self, size):
if size <= 0:
raise ValueError("mini_batch_size must be > 0. Got %s" % size)
self._set(mini_batch_size=size)
def getFeatureDim(self):
return self.getOrDefault(self.feature_dim)
def setFeatureDim(self, value):
if value <= 0:
raise ValueError("feature_dim must be > 0. Got %s" % value)
self._set(feature_dim=value)
def getEvalMetrics(self):
return self.getOrDefault(self.eval_metrics).strip('[').strip(']')
def setEvalMetrics(self, value):
valid_tokens = ("msd", "ssd")
tokens = value.split(",")
for token in tokens:
if token.strip() not in valid_tokens:
raise ValueError("values allowed in eval_metrics are: %s, found: %s " %
(','.join(valid_tokens), token))
self._set(eval_metrics='[' + value + ']')
@classmethod
def _from_java(cls, javaObject):
return KMeansSageMakerEstimator(sagemakerRole=None, javaObject=javaObject) | /sagemaker_pyspark-1.4.5.tar.gz/sagemaker_pyspark-1.4.5/src/sagemaker_pyspark/algorithms/KMeansSageMakerEstimator.py | 0.866345 | 0.557002 | KMeansSageMakerEstimator.py | pypi |
from pyspark.ml.param import Params, Param, TypeConverters
from pyspark.ml.util import Identifiable
from sagemaker_pyspark import (SageMakerEstimatorBase, S3AutoCreatePath, Option, IAMRoleFromConfig,
EndpointCreationPolicy, SageMakerClients, RandomNamePolicyFactory)
from sagemaker_pyspark.transformation.serializers import LibSVMRequestRowSerializer
from sagemaker_pyspark.transformation.deserializers import XGBoostCSVRowDeserializer
class XGBoostSageMakerEstimator(SageMakerEstimatorBase):
"""
A :class:`~sagemaker_pyspark.SageMakerEstimator` that runs an XGBoost training job in
Amazon SageMaker and returns a :class:`~sagemaker_pyspark.SageMakerModel` that can be used to
transform a DataFrame using he hosted XGBoost model. XGBoost is an open-source distributed
gradient boosting library that Amazon SageMaker has adapted to run on Amazon SageMaker.
XGBoost trains and infers on LibSVM-formatted data. XGBoostSageMakerEstimator uses Spark's
LibSVMFileFormat to write the training DataFrame to S3, and serializes Rows to LibSVM for
inference, selecting the column named "features" by default, expected to contain a Vector of
Doubles.
Inferences made against an Endpoint hosting an XGBoost model contain a "prediction" field
appended to the input DataFrame as a column of Doubles, containing the prediction corresponding
to the given Vector of features.
See `XGBoost github <https://github.com/dmlc/xgboost>`__ for more on XGBoost
Args:
sageMakerRole (IAMRole): The SageMaker TrainingJob and Hosting IAM Role. Used by
SageMaker to access S3 and ECR Resources. SageMaker hosted Endpoint instances
launched by this Estimator run with this role.
trainingInstanceType (str): The SageMaker TrainingJob Instance Type to use.
trainingInstanceCount (int): The number of instances of instanceType to run an
SageMaker Training Job with.
endpointInstanceType (str): The SageMaker Endpoint Config instance type.
endpointInitialInstanceCount (int): The SageMaker Endpoint Config minimum number of
instances that can be used to host modelImage.
requestRowSerializer (RequestRowSerializer): Serializes Spark DataFrame Rows for
transformation by Models built from this Estimator.
responseRowDeserializer (ResponseRowDeserializer): Deserializes an Endpoint response into a
series of Rows.
trainingInputS3DataPath (S3Resource): An S3 location to upload SageMaker Training Job input
data to.
trainingOutputS3DataPath (S3Resource): An S3 location for SageMaker to store Training Job
output data to.
trainingInstanceVolumeSizeInGB (int): The EBS volume size in gigabytes of each instance.
trainingProjectedColumns (List): The columns to project from the Dataset being fit before
training. If an Optional.empty is passed then no specific projection will occur and
all columns will be serialized.
trainingChannelName (str): The SageMaker Channel name to input serialized Dataset fit
input to.
trainingContentType (str): The MIME type of the training data.
trainingS3DataDistribution (str): The SageMaker Training Job S3 data distribution scheme.
trainingSparkDataFormat (str): The Spark Data Format name used to serialize the Dataset
being fit for input to SageMaker.
trainingSparkDataFormatOptions (dict): The Spark Data Format Options used during
serialization of the Dataset being fit.
trainingInputMode (str): The SageMaker Training Job Channel input mode.
trainingCompressionCodec (str): The type of compression to use when serializing the
Dataset being fit for input to SageMaker.
trainingMaxRuntimeInSeconds (int): A SageMaker Training Job Termination Condition
MaxRuntimeInHours.
trainingKmsKeyId (str): A KMS key ID for the Output Data Source.
modelEnvironmentVariables (dict): The environment variables that SageMaker will set on the
model container during execution.
endpointCreationPolicy (EndpointCreationPolicy): Defines how a SageMaker Endpoint
referenced by a SageMakerModel is created.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send CreateTrainingJob,
CreateModel, and CreateEndpoint requests.
region (str): The region in which to run the algorithm. If not specified, gets the region
from the DefaultAwsRegionProviderChain.
s3Client (AmazonS3): Used to create a bucket for staging SageMaker Training Job
input and/or output if either are set to S3AutoCreatePath.
stsClient (AmazonSTS): Used to resolve the account number when creating staging
input / output buckets.
modelPrependInputRowsToTransformationRows (bool): Whether the transformation result on
Models built by this Estimator should also include the input Rows. If true,
each output Row is formed by a concatenation of the input Row with the corresponding
Row produced by SageMaker Endpoint invocation, produced by responseRowDeserializer.
If false, each output Row is just taken from responseRowDeserializer.
deleteStagingDataAfterTraining (bool): Whether to remove the training data on s3 after
training is complete or failed.
namePolicyFactory (NamePolicyFactory): The NamePolicyFactory to use when naming SageMaker
entities created during fit.
uid (str): The unique identifier of this Estimator. Used to represent this stage in Spark
ML pipelines.
"""
_wrapped_class = \
"com.amazonaws.services.sagemaker.sparksdk.algorithms.XGBoostSageMakerEstimator"
booster = Param(
Params._dummy(), "booster",
"Which booster to use. Can be 'gbtree', 'gblinear' or 'dart'. "
"gbtree and dart use tree based model while gblinear uses linear function.",
typeConverter=TypeConverters.toString)
silent = Param(
Params._dummy(), "silent",
"Whether in silent mode."
"0 means print running messages, 1 means silent mode.",
typeConverter=TypeConverters.toInt)
nthread = Param(
Params._dummy(), "nthread",
"Number of parallel threads used to run xgboot. Must be >= 1.",
typeConverter=TypeConverters.toInt)
eta = Param(
Params._dummy(), "eta",
"Step size shrinkage used in update to prevent overfitting. After each boosting step, "
"we can directly get the weights of new features. and eta shrinks the feature weights "
"to make the boosting process more conservative. Must be in [0, 1].",
typeConverter=TypeConverters.toFloat)
gamma = Param(
Params._dummy(), "gamma",
"Minimum loss reduction required to make an additional partition on a leaf node"
" of the tree. The larger the value, the more conservative the algorithm will be."
"Must be >= 0.",
typeConverter=TypeConverters.toFloat)
max_depth = Param(
Params._dummy(), "max_depth",
"Maximum depth of a tree. Increasing this value makes the model more complex and "
"likely to be overfitted. 0 indicates no limit. A limit is required when"
"grow_policy=depth-wise. Must be >= 0. Default value is 6",
typeConverter=TypeConverters.toInt)
min_child_weight = Param(
Params._dummy(), "min_child_weight",
"Minimum sum of instance weight (hessian) needed in a child. If the tree partition step "
"results in a leaf node with the sum of instance weight less than min_child_weight, then "
"the building process will give up further partitioning. In linear regression mode, "
"this simply corresponds to minimum number of instances needed to be in each node. "
"The larger the value, the more conservative the algorithm will be. Must be >= 0.",
typeConverter=TypeConverters.toFloat)
max_delta_step = Param(
Params._dummy(), "max_delta_step",
"Maximum delta step we allow each tree's weight estimation to be. "
"If the value is set to 0, it means there is no constraint. If it is set to a positive "
"value, it can help make the update step more conservative. Usually this parameter is "
"not needed, but it might help in logistic regression when the classes are extremely"
" imbalanced. Setting it to value of 1-10 might help control the update. Must be >= 0.",
typeConverter=TypeConverters.toFloat)
subsample = Param(
Params._dummy(), "subsample",
"Subsample ratio of the training instance. Setting it to 0.5 means that XGBoost will "
"randomly collect half of the data instances to grow trees and this will "
"prevent overfitting. Must be (0, 1].",
typeConverter=TypeConverters.toFloat)
colsample_bytree = Param(
Params._dummy(), "colsample_bytree",
"Subsample ratio of columns when constructing each tree. Must be in (0, 1].",
typeConverter=TypeConverters.toFloat)
colsample_bylevel = Param(
Params._dummy(), "colsample_bylevel",
"Subsample ratio of columns for each split, in each level. Must be in (0, 1].",
typeConverter=TypeConverters.toFloat)
lambda_weights = Param(
Params._dummy(), "lambda_weights",
"L2 regularization term on weights, increase this value"
" will make model more conservative.",
typeConverter=TypeConverters.toFloat)
alpha = Param(
Params._dummy(), "alpha",
"L1 regularization term on weights, increase this value "
"will make model more conservative.",
typeConverter=TypeConverters.toFloat)
tree_method = Param(
Params._dummy(), "tree_method",
"The tree construction algorithm used in XGBoost. Can be "
"'auto', 'exact', 'approx' or 'hist'",
typeConverter=TypeConverters.toString)
sketch_eps = Param(
Params._dummy(), "sketch_eps",
"Used only for approximate greedy algorithm. This translates into O(1 / sketch_eps) number"
"of bins. Compared to directly select number of bins, this comes with theoretical guarantee"
"with sketch accuracy."
"Must be in (0, 1).",
typeConverter=TypeConverters.toFloat)
scale_pos_weight = Param(
Params._dummy(), "scale_pos_weight",
"Controls the balance of positive and negative weights. It's useful for unbalanced classes."
"A typical value to consider: sum(negative cases) / sum(positive cases).",
typeConverter=TypeConverters.toFloat)
updater = Param(
Params._dummy(), "updater",
"A comma separated string defining the sequence of tree updaters to run, "
"providing a modular way to construct and to modify the trees. "
"This is an advanced parameter that is usually set automatically, "
"depending on some other parameters. Can be "
"'grow_colmaker', 'distcol', 'grow_histmaker', 'grow_local_histmaker',"
"'grow_skmaker', 'sync', 'refresh', 'prune'.",
typeConverter=TypeConverters.toString)
refresh_leaf = Param(
Params._dummy(), "refresh_leaf",
"This is a parameter of the 'refresh' updater plugin. When set to true, tree leaves and"
"tree node stats are updated. When set to false, only tree node stats are updated.",
typeConverter=TypeConverters.toInt)
process_type = Param(
Params._dummy(), "process_type",
"The type of boosting process to run. Can be 'default', 'update'",
typeConverter=TypeConverters.toString)
grow_policy = Param(
Params._dummy(), "grow_policy",
"Controls the way that new nodes are added to the tree. Currently supported"
"only if tree_method is set to hist. Can be 'depthwise', 'lossguide'",
typeConverter=TypeConverters.toString)
max_leaves = Param(
Params._dummy(), "max_leaves",
"Maximum number of nodes to be added. Relevant only if grow_policy = lossguide.",
typeConverter=TypeConverters.toInt)
max_bin = Param(
Params._dummy(), "max_bin",
"Maximum number of discrete bins to bucket continuous features."
"Used only if tree_method = hist.",
typeConverter=TypeConverters.toInt)
sample_type = Param(
Params._dummy(), "sample_type",
"Type of sampling algorithm. Can be 'uniform' or 'weighted'."
"'uniform': dropped trees are selected uniformly."
"'weighted': dropped trees are selected in proportion to weight.",
typeConverter=TypeConverters.toString)
normalize_type = Param(
Params._dummy(), "normalize_type",
"type of normalization algorithm. Can be 'tree' or 'forest'"
"'tree': new trees have the same weight of each of dropped trees."
"'forest': new trees have the same weight of sum of dropped trees (forest).",
typeConverter=TypeConverters.toString)
rate_drop = Param(
Params._dummy(), "rate_drop",
"dropout rate (a fraction of previous trees to drop during the dropout). "
"Must be in [0.0, 1.0]",
typeConverter=TypeConverters.toFloat)
one_drop = Param(
Params._dummy(), "one_drop",
"When this flag is enabled, at least one tree is always dropped during the dropout.",
typeConverter=TypeConverters.toInt)
skip_drop = Param(
Params._dummy(), "skip_drop",
"Probability of skipping the dropout procedure during a boosting iteration."
"Must be in [0.0, 1.0]",
typeConverter=TypeConverters.toFloat)
lambda_bias = Param(
Params._dummy(), "lambda_bias",
"L2 regularization term on bias. Must be in [0, 1].",
typeConverter=TypeConverters.toFloat)
tweedie_variance_power = Param(
Params._dummy(), "tweedie_variance_power",
"parameter that controls the variance of the Tweedie distribution. Must be in (1.0, 2.0).",
typeConverter=TypeConverters.toFloat)
objective = Param(
Params._dummy(), "objective",
"Specifies the learning objective."
"\"reg:logistic\" --logistic regression "
"\"binary:logistic\" --logistic regression for binary classification, "
"output is probability "
"\"binary:logitraw\" --logistic regression for binary classification, output is"
" score before logistic transformation "
"\"count:poisson\" --poisson regression for count data, output mean of poisson"
" distribution max_delta_step is set to 0.7 by default in poisson regression "
"(used to safeguard optimization) "
"\"multi:softmax\" --multiclass classification using the softmax objective. "
"You also need to set num_class(number of classes)"
"\"multi:softprob\" --same as softmax, but output a vector of ndata * nclass, "
"which can be further reshaped to ndata, nclass matrix. "
"The result contains predicted probability of each data point belonging to each class. "
"\"rank:pairwise\" --set XGBoost to do ranking task by minimizing the pairwise loss "
"\"reg:gamma\" --gamma regression with log-link. Output is a mean of gamma distribution. "
"It might be useful, e.g., for modeling insurance claims severity, or for any outcome "
"that might be gamma-distributed"
"\"reg:tweedie\" --Tweedie regression with log-link. It might be useful, e.g., for "
"modeling total loss in insurance, or for any outcome that might be"
" Tweedie-distributed.",
typeConverter=TypeConverters.toString)
num_class = Param(
Params._dummy(), "num_class",
"Number of classes. >= 1",
typeConverter=TypeConverters.toInt)
base_score = Param(
Params._dummy(), "base_score",
"the initial prediction score of all instances, global bias. Value range: [0.0, 1.0]",
typeConverter=TypeConverters.toFloat)
eval_metric = Param(
Params._dummy(), "eval_metric",
"Evaluation metrics for validation data. A default metric will be assigned according to"
" objective (rmse for regression, and error for classification, mean average "
"precision for ranking). Values: 'rmse', 'mae', 'logloss', 'error', 'error@t', 'merror',"
"'mlogloss', 'auc', 'ndcg', 'ndcg@n', 'ndcg@n-', 'map-', 'map@n-'.",
typeConverter=TypeConverters.toString)
seed = Param(
Params._dummy(), "seed",
"Random number seed",
typeConverter=TypeConverters.toInt)
num_round = Param(
Params._dummy(), "num_round",
"The number of rounds to run the training. Must be >= 1",
typeConverter=TypeConverters.toInt)
def __init__(self,
trainingInstanceType,
trainingInstanceCount,
endpointInstanceType,
endpointInitialInstanceCount,
sagemakerRole=IAMRoleFromConfig(),
requestRowSerializer=LibSVMRequestRowSerializer(),
responseRowDeserializer=XGBoostCSVRowDeserializer(),
trainingInputS3DataPath=S3AutoCreatePath(),
trainingOutputS3DataPath=S3AutoCreatePath(),
trainingInstanceVolumeSizeInGB=1024,
trainingProjectedColumns=None,
trainingChannelName="train",
trainingContentType=None,
trainingS3DataDistribution="ShardedByS3Key",
trainingSparkDataFormat="libsvm",
trainingSparkDataFormatOptions=None,
trainingInputMode="File",
trainingCompressionCodec=None,
trainingMaxRuntimeInSeconds=24*60*60,
trainingKmsKeyId=None,
modelEnvironmentVariables=None,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_CONSTRUCT,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
region=None,
s3Client=SageMakerClients.create_s3_default_client(),
stsClient=SageMakerClients.create_sts_default_client(),
modelPrependInputRowsToTransformationRows=True,
deleteStagingDataAfterTraining=True,
namePolicyFactory=RandomNamePolicyFactory(),
uid=None):
if trainingSparkDataFormatOptions is None:
trainingSparkDataFormatOptions = {}
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
if uid is None:
uid = Identifiable._randomUID()
kwargs = locals().copy()
del kwargs['self']
super(XGBoostSageMakerEstimator, self).__init__(**kwargs)
def _get_java_obj(self, **kwargs):
return self._new_java_obj(
XGBoostSageMakerEstimator._wrapped_class,
kwargs['sagemakerRole'],
kwargs['trainingInstanceType'],
kwargs['trainingInstanceCount'],
kwargs['endpointInstanceType'],
kwargs['endpointInitialInstanceCount'],
kwargs['requestRowSerializer'],
kwargs['responseRowDeserializer'],
kwargs['trainingInputS3DataPath'],
kwargs['trainingOutputS3DataPath'],
kwargs['trainingInstanceVolumeSizeInGB'],
Option(kwargs['trainingProjectedColumns']),
kwargs['trainingChannelName'],
Option(kwargs['trainingContentType']),
kwargs['trainingS3DataDistribution'],
kwargs['trainingSparkDataFormat'],
kwargs['trainingSparkDataFormatOptions'],
kwargs['trainingInputMode'],
Option(kwargs['trainingCompressionCodec']),
kwargs['trainingMaxRuntimeInSeconds'],
Option(kwargs['trainingKmsKeyId']),
kwargs['modelEnvironmentVariables'],
kwargs['endpointCreationPolicy'],
kwargs['sagemakerClient'],
Option(kwargs['region']),
kwargs['s3Client'],
kwargs['stsClient'],
kwargs['modelPrependInputRowsToTransformationRows'],
kwargs['deleteStagingDataAfterTraining'],
kwargs['namePolicyFactory'],
kwargs['uid']
)
def getBooster(self):
return self.getOrDefault(self.booster)
def setBooster(self, value):
if value not in ('gbtree', 'gblinear', 'dart'):
raise ValueError("booster must be 'gbtree', 'gblinear' or 'dart'. got: %s" % value)
self._set(booster=value)
def getSilent(self):
return self.getOrDefault(self.silent)
def setSilent(self, value):
if value not in (0, 1):
raise ValueError("silent must be either 0 or 1. got: %s" % value)
self._set(silent=value)
def getNThread(self):
return self.getOrDefault(self.nthread)
def setNThread(self, value):
if value < 1:
raise ValueError("nthread must be >= 1 got: %s" % value)
self._set(nthread=value)
def getEta(self):
return self.getOrDefault(self.eta)
def setEta(self, value):
if value < 0 or value > 1:
raise ValueError("eta must be within range [0.0, 1.0] got: %s" % value)
self._set(eta=value)
def getGamma(self):
return self.getOrDefault(self.gamma)
def setGamma(self, value):
if value < 0:
raise ValueError("gamma must be >= 0 got: %s" % value)
self._set(gamma=value)
def getMaxDepth(self):
return self.getOrDefault(self.max_depth)
def setMaxDepth(self, value):
if value < 0:
raise ValueError("gamma must be >=0 got: %s" % value)
self._set(max_depth=value)
def getMinChildWeight(self):
return self.getOrDefault(self.min_child_weight)
def setMinChildWeight(self, value):
if value < 0:
raise ValueError("min_child_weight must be >= 0 got: %s" % value)
self._set(min_child_weight=value)
def getMaxDeltaStep(self):
return self.getOrDefault(self.max_delta_step)
def setMaxDeltaStep(self, value):
if value < 0:
raise ValueError("max_delta_weight must be >=0 got: %s" % value)
self._set(max_delta_step=value)
def getSubsample(self):
return self.getOrDefault(self.subsample)
def setSubsample(self, value):
if value <= 0 or value > 1:
raise ValueError("subsample must be in range (0, 1] got: %s" % value)
self._set(subsample=value)
def getColSampleByTree(self):
return self.getOrDefault(self.colsample_bytree)
def setColSampleByTree(self, value):
if value <= 0 or value > 1:
raise ValueError("colsample_bytree must be in range (0, 1] got: %s" % value)
self._set(colsample_bytree=value)
def getColSampleByLevel(self):
return self.getOrDefault(self.colsample_bylevel)
def setColSampleByLevel(self, value):
if value <= 0 or value > 1:
raise ValueError("colsample_by_level must be in range (0, 1] got: %s" % value)
self._set(colsample_bylevel=value)
def getLambda(self):
return self.getOrDefault(self.lambda_weights)
def setLambda(self, value):
self._set(lambda_weights=value)
def getAlpha(self):
return self.getOrDefault(self.alpha)
def setAlpha(self, value):
self._set(alpha=value)
def getTreeMethod(self):
return self.getOrDefault(self.tree_method)
def setTreeMethod(self, value):
if value not in ("auto", "exact", "approx", "hist"):
raise ValueError("tree_method must be one of: 'auto', 'exact', 'approx', 'hist', "
"got: %s" % value)
self._set(tree_method=value)
def getSketchEps(self):
return self.getOrDefault(self.sketch_eps)
def setSketchEps(self, value):
if value <= 0 or value >= 1:
raise ValueError("sketch_eps must be in range (0, 1) got: %s" % value)
self._set(sketch_eps=value)
def getScalePosWeight(self):
return self.getOrDefault(self.scale_pos_weight)
def setScalePosWeight(self, value):
self._set(scale_pos_weight=value)
def getUpdater(self):
return self.getOrDefault(self.updater)
def setUpdater(self, value):
valid_tokens = ("grow_colmaker", "distcol", "grow_histmaker", "grow_local_histmaker",
"grow_skmaker", "sync", "refresh", "prune")
tokens = value.split(",")
for token in tokens:
if token.strip() not in valid_tokens:
raise ValueError("values allowed in updater are: %s, found: %s " %
(','.join(valid_tokens), token))
self._set(updater=value)
def getRefreshLeaf(self):
return self.getOrDefault(self.refresh_leaf)
def setRefreshLeaf(self, value):
if value not in (0, 1):
raise ValueError("refresh_leaf must be either 0 or 1, got: %s" % value)
self._set(refresh_leaf=value)
def getProcessType(self):
return self.getOrDefault(self.process_type)
def setProcessType(self, value):
if value not in ("default", "update"):
raise ValueError("process_type must be 'default' or 'update', got: %s" % value)
self._set(process_type=value)
def getGrowPolicy(self):
return self.getOrDefault(self.grow_policy)
def setGrowPolicy(self, value):
if value not in ("depthwise", "lossguide"):
raise ValueError("grow_policy must be 'depthwise' or 'lossguide', got: %s" % value)
self._set(grow_policy=value)
def getMaxLeaves(self):
return self.getOrDefault(self.max_leaves)
def setMaxLeaves(self, value):
if value < 0:
raise ValueError("max_leaves must be >=0, got: %s" % value)
self._set(max_leaves=value)
def getMaxBin(self):
return self.getOrDefault(self.max_bin)
def setMaxBin(self, value):
if value < 1:
raise ValueError("max_bin must be >=1, got: %s" % value)
self._set(max_bin=value)
def getSampleType(self):
return self.getOrDefault(self.sample_type)
def setSampleType(self, value):
if value not in ("uniform", "weighted"):
raise ValueError("sample_type must be 'uniform' or 'weighted', got: %s" % value)
self._set(sample_type=value)
def getNormalizeType(self):
return self.getOrDefault(self.normalize_type)
def setNormalizeType(self, value):
if value not in ("tree", "forest"):
raise ValueError("normalize_type must be 'tree' or 'forest', got: %s" % value)
self._set(normalize_type=value)
def getRateDrop(self):
return self.getOrDefault(self.rate_drop)
def setRateDrop(self, value):
if value < 0 or value > 1:
raise ValueError("rate_drop must be in range [0.0, 1.0], got: %s" % value)
self._set(rate_drop=value)
def getOneDrop(self):
return self.getOrDefault(self.one_drop)
def setOneDrop(self, value):
if value not in (0, 1):
raise ValueError("one_drop must be 0 or 1, got: %s" % value)
self._set(one_drop=value)
def getSkipDrop(self):
return self.getOrDefault(self.skip_drop)
def setSkipDrop(self, value):
if value < 0 or value > 1:
raise ValueError("skip_drop must be in range [0.0, 1.0], got: %s" % value)
self._set(skip_drop=value)
def getLambdaBias(self):
return self.getOrDefault(self.lambda_bias)
def setLambdaBias(self, value):
if value < 0 or value > 1:
raise ValueError("lambda_bias must in range [0.0, 1.0], got: %s" % value)
self._set(lambda_bias=value)
def getTweedieVariancePower(self):
return self.getOrDefault(self.tweedie_variance_power)
def setTweedieVariancePower(self, value):
if value <= 1 or value >= 2:
raise ValueError("tweedie_variance_power must be in range (1.0, 2.0), got: %s" % value)
self._set(tweedie_variance_power=value)
def getObjective(self):
return self.getOrDefault(self.objective)
def setObjective(self, value):
allowed_values = ("reg:linear", "reg:logistic", "binary:logistic", "binary:logistraw",
"count:poisson", "multi:softmax", "multi:softprob", "rank:pairwise",
"reg:gamma", "reg:tweedie")
if value not in allowed_values:
raise ValueError("objective must be one of (%s), got: %s" %
(','.join(allowed_values), value))
self._set(objective=value)
def getNumClasses(self):
return self.getOrDefault(self.num_class)
def setNumClasses(self, value):
if value < 1:
raise ValueError("num_class must be >=1, got: %s" % value)
self._set(num_class=value)
def getBaseScore(self):
return self.getOrDefault(self.base_score)
def setBaseScore(self, value):
self._set(base_score=value)
def getEvalMetric(self):
return self.getOrDefault(self.eval_metric)
def setEvalMetric(self, value):
allowed_values = ("rmse", "mae", "logloss", "error", "error@t", "merror",
"mlogloss", "auc", "ndcg", "map", "ndcg@n", "ndcg-", "ndcg@n-",
"map-", "map@n-")
if value not in allowed_values:
raise ValueError("eval_metric must be one of (%s), got: %s" %
(','.join(allowed_values), value))
self._set(eval_metric=value)
def getSeed(self):
return self.getOrDefault(self.seed)
def setSeed(self, value):
self._set(seed=value)
def getNumRound(self):
return self.getOrDefault(self.num_round)
def setNumRound(self, value):
if value < 1:
raise ValueError("num_round must be >= 1, got: %s" % value)
self._set(num_round=value)
@classmethod
def _from_java(cls, javaObject):
return XGBoostSageMakerEstimator(sagemakerRole=None, javaObject=javaObject) | /sagemaker_pyspark-1.4.5.tar.gz/sagemaker_pyspark-1.4.5/src/sagemaker_pyspark/algorithms/XGBoostSageMakerEstimator.py | 0.899055 | 0.64777 | XGBoostSageMakerEstimator.py | pypi |
import numbers
from pyspark.ml.param import Params, Param, TypeConverters
from pyspark.ml.util import Identifiable
from sagemaker_pyspark import (SageMakerEstimatorBase, S3AutoCreatePath, Option, IAMRoleFromConfig,
EndpointCreationPolicy, SageMakerClients, RandomNamePolicyFactory)
from sagemaker_pyspark.transformation.serializers import ProtobufRequestRowSerializer
from sagemaker_pyspark.transformation.deserializers import (
LinearLearnerBinaryClassifierProtobufResponseRowDeserializer,
LinearLearnerMultiClassClassifierProtobufResponseRowDeserializer,
LinearLearnerRegressorProtobufResponseRowDeserializer)
class LinearLearnerParams(Params):
feature_dim = Param(Params._dummy(), "feature_dim",
"The dimension of the input vectors. Must be > 0. ",
typeConverter=TypeConverters.toInt)
mini_batch_size = Param(Params._dummy(), "mini_batch_size",
"The number of examples in a mini-batch. Must be > 0. ",
typeConverter=TypeConverters.toInt)
epochs = Param(Params._dummy(), "epochs",
"The number of passes done over the training data. Must be > 0. ",
typeConverter=TypeConverters.toInt)
predictor_type = Param(Params._dummy(), "predictor_type",
"Whether training is for binary classification or regression. "
"Supported options: 'binary_classifier', and 'regressor'. ",
typeConverter=TypeConverters.toString)
use_bias = Param(Params._dummy(), "use_bias",
"Whether model should include bias. ",
typeConverter=TypeConverters.toString)
num_models = Param(Params._dummy(), "num_models",
"Number of models to train in parallel. Must be > 0 or 'auto'. ",
typeConverter=TypeConverters.toString)
num_calibration_samples = Param(Params._dummy(), "num_calibration_samples",
"Number of samples to use from validation dataset for doing "
"model calibration (finding the best threshold). "
"Must be > 0.",
typeConverter=TypeConverters.toInt)
init_method = Param(Params._dummy(), "init_method",
"Initialization function for the model weights. "
"Supported options: 'uniform' and 'normal'. ",
typeConverter=TypeConverters.toString)
init_scale = Param(Params._dummy(), "init_scale",
"Scale for init method uniform. Must be > 0. ",
typeConverter=TypeConverters.toFloat)
init_sigma = Param(Params._dummy(), "init_sigma",
"Standard deviation for init method normal. Must be > 0. ",
typeConverter=TypeConverters.toFloat)
init_bias = Param(Params._dummy(), "init_bias",
"Initial weight for bias. ",
typeConverter=TypeConverters.toFloat)
optimizer = Param(Params._dummy(), "optimizer",
"Which optimizer is to be used. Supported options: "
"'sgd', 'adam', 'rmsprop' and 'auto'. ",
typeConverter=TypeConverters.toString)
loss = Param(Params._dummy(), "loss",
"The loss function to apply. Supported options: "
"'logistic', 'squared_loss', 'absolute_loss', 'hinge_loss',"
"'eps_insensitive_squared_loss', 'eps_insensitive_absolute_loss', 'quantile_loss',"
"'huber_loss', 'softmax_loss', 'auto'.",
typeConverter=TypeConverters.toString)
wd = Param(Params._dummy(), "wd",
"The L2 regularization, i.e. the weight decay parameter. "
"Must >= 0. ",
typeConverter=TypeConverters.toFloat)
l1 = Param(Params._dummy(), "l1",
"The L1 regularization parameter. Use 0 for no L1 regularization. "
"Must be in >= 0. ",
typeConverter=TypeConverters.toFloat)
momentum = Param(Params._dummy(), "momentum",
"Momentum parameter of sgd optimizer. Must be in range [0, 1). ",
typeConverter=TypeConverters.toFloat)
learning_rate = Param(Params._dummy(), "learning_rate",
"The learning rate. The default 'auto' will depend upon the optimizer "
"selected. Must be > 0 or 'auto'. ",
typeConverter=TypeConverters.toString)
beta_1 = Param(Params._dummy(), "beta_1",
"Parameter specific to adam optimizer. exponential decay rate for first moment "
"estimates. Must be in range [0, 1). ",
typeConverter=TypeConverters.toFloat)
beta_2 = Param(Params._dummy(), "beta_2",
"Parameter specific to adam optimizer. exponential decay rate for second moment "
"estimates. Must be in range [0, 1). ",
typeConverter=TypeConverters.toFloat)
bias_lr_mult = Param(Params._dummy(), "bias_lr_mult",
"Allows different learning rate for the bias term. "
"The actual learning rate for the bias is learning rate times "
"bias_lr_mult. Must be > 0. ",
typeConverter=TypeConverters.toFloat)
bias_wd_mult = Param(Params._dummy(), "bias_wd_mult",
"Allows different learning rate for the bias term. "
"The actual L2 regularization weight for the bias is wd times "
"bias_wd_mult. Must be >= 0. ",
typeConverter=TypeConverters.toFloat)
use_lr_scheduler = Param(Params._dummy(), "use_lr_scheduler",
"Whether to use a scheduler for the learning rate. ",
typeConverter=TypeConverters.toString)
lr_scheduler_step = Param(Params._dummy(), "lr_scheduler_step",
"Parameter specific to lr_scheduler. "
"The number of steps between decreases of the learning rate. "
"Must be > 0. ",
typeConverter=TypeConverters.toInt)
lr_scheduler_factor = Param(Params._dummy(), "lr_scheduler_factor",
"Parameter specific to lr_scheduler. "
"Every lr_scheduler_step the learning rate will decrease by this "
"quantity. Must be in (0, 1). ",
typeConverter=TypeConverters.toFloat)
lr_scheduler_minimum_lr = Param(Params._dummy(), "lr_scheduler_minimum_lr",
"Parameter specific to lr_scheduler. "
"The learning rate will never decrease to a value lower than "
"lr_scheduler_minimum_lr. Must be > 0. ",
typeConverter=TypeConverters.toFloat)
normalize_data = Param(Params._dummy(), "normalize_data",
"Whether to normalize the features before training to have "
"std_dev of 1.",
typeConverter=TypeConverters.toString)
normalize_label = Param(Params._dummy(), "normalize_label",
"Whether regression label is normalized. "
"If set for classification, it will be ignored.",
typeConverter=TypeConverters.toString)
unbias_data = Param(Params._dummy(), "unbias_data",
"Whether to unbias the features before training so that mean is 0. "
"By default data is unbiased if use_bias is set to true.",
typeConverter=TypeConverters.toString)
unbias_label = Param(Params._dummy(), "unbias_label",
"Whether to unbias the labels before training so that mean is 0. "
"Only done for regrssion if use_bias is true. Otherwise will be ignored.",
typeConverter=TypeConverters.toString)
num_point_for_scaler = Param(Params._dummy(), "num_point_for_scaler",
"Number of data points to use for calcuating the "
"normalizing / unbiasing terms. Must be > 0",
typeConverter=TypeConverters.toInt)
early_stopping_patience = Param(Params._dummy(), "early_stopping_patience",
"The number of epochs to wait before ending training if no"
"improvement is made in the relevant metric. The metric is"
"the binary_classifier_model_selection_criteria if provided,"
"otherwise the metric is the same as loss. The metric is"
"evaluatedon the validation data. If no validation data is"
"provided, the metric is always the same as loss and is"
"evaluated on the training data. To disable early stopping,"
"set early_stopping_patience to a value larger than epochs."
"Must be > 0",
typeConverter=TypeConverters.toInt)
early_stopping_tolerance = Param(Params._dummy(), "early_stopping_tolerance",
"Relative tolerance to measure an improvement in loss. If the"
"ratio of the improvement in loss divided by the previous best"
"loss is smaller than this value, early stopping will consider"
"the improvement to be zero. Must be > 0. ",
typeConverter=TypeConverters.toFloat)
margin = Param(Params._dummy(), "margin",
"Margin for hinge_loss. Must be > 0. ",
typeConverter=TypeConverters.toFloat)
quantile = Param(Params._dummy(), "quantile",
"Quantile for quantile loss. For quantile q, the model will attempt to"
"produce predictions such that true_label < prediction with probability q."
"Must be in (0, 1). ",
typeConverter=TypeConverters.toFloat)
loss_insensitivity = Param(Params._dummy(), "loss_insensitivity",
"Parameter for epsilon insensitive loss type. During training and"
"metric evaluation, any error smaller than this is considered to be"
"zero. Must be > 0. ",
typeConverter=TypeConverters.toFloat)
huber_delta = Param(Params._dummy(), "huber_delta",
"Parameter for Huber loss. During training and metric evaluation, compute"
"L2 loss for errors smaller than delta and L1 loss for errors larger than"
"delta. Must be > 0. ",
typeConverter=TypeConverters.toFloat)
f_beta = Param(Params._dummy(), "f_beta",
"The value of beta to use when calculating F score metrics for binary or"
"multiclass classification. Also used if"
"binary_classifier_model_selection_criteria is f_beta. Must be > 0. ",
typeConverter=TypeConverters.toFloat)
def getFeatureDim(self):
return self.getOrDefault(self.feature_dim)
def setFeatureDim(self, value):
if value <= 0:
raise ValueError("feature_dim must be > 0. Got %s" % value)
self._set(feature_dim=value)
def getMiniBatchSize(self):
return self.getOrDefault(self.mini_batch_size)
def setMiniBatchSize(self, size):
if size <= 0:
raise ValueError("mini_batch_size must be > 0. Got %s" % size)
self._set(mini_batch_size=size)
def getEpochs(self):
return self.getOrDefault(self.epochs)
def setEpochs(self, value):
if value < 1:
raise ValueError("Epochs must be > 0, got: %s" % value)
self._set(epochs=value)
def getUseBias(self):
value = self.getOrDefault(self.use_bias)
if value == 'True':
return True
else:
return False
def setUseBias(self, value):
if value not in ('True', 'False'):
raise ValueError("use_bias must be 'True' or 'False', got %s" % value)
self._set(use_bias=value)
def getNumModels(self):
return self.getOrDefault(self.num_models)
def setNumModels(self, value):
if isinstance(value, numbers.Real) and value < 1:
raise ValueError("num_models must be 'auto' or > 0, got: %s" % value)
if value != 'auto' and int(value) < 1:
raise ValueError("num_models must be 'auto' or > 0, got: %s" % value)
self._set(num_models=str(value))
def getNumCalibrationSamples(self):
return self.getOrDefault(self.num_calibration_samples)
def setNumCalibrationSamples(self, value):
if value < 1:
raise ValueError("num_calibration_samples must be > 0, got: %s" % value)
self._set(num_calibration_samples=value)
def getInitMethod(self):
return self.getOrDefault(self.init_method)
def setInitMethod(self, value):
if value not in ('uniform', 'normal'):
raise ValueError("init_method must be 'uniform' or 'normal', got: %s" % value)
self._set(init_method=value)
def getInitScale(self):
return self.getOrDefault(self.init_scale)
def setInitScale(self, value):
if value <= 0:
raise ValueError("init_scale must be > 0, got: %s" % value)
self._set(init_scale=value)
def getInitSigma(self):
return self.getOrDefault(self.init_sigma)
def setInitSigma(self, value):
if value <= 0:
raise ValueError("init_sigma must be > 0, got: %s" % value)
self._set(init_sigma=value)
def getInitBias(self):
return self.getOrDefault(self.init_bias)
def setInitBias(self, value):
self._set(init_bias=value)
def getOptimizer(self):
return self.getOrDefault(self.optimizer)
def setOptimizer(self, value):
if value not in ('sgd', 'adam', 'rmsprop', 'auto'):
raise ValueError("optimizer must be 'sgd', 'adam', 'rmsprop', 'auto', got: %s" % value)
self._set(optimizer=value)
def getLoss(self):
return self.getOrDefault(self.loss)
def setLoss(self, value):
if value not in ('logistic', 'squared_loss', 'absolute_loss', 'hinge_loss',
'eps_insensitive_squared_loss', 'eps_insensitive_absolute_loss',
'quantile_loss', 'huber_loss', 'softmax_loss', 'auto'):
raise ValueError("loss must be 'logistic', 'squared_loss', 'absolute_loss',"
"'hinge_loss', 'eps_insensitive_squared_loss',"
"'eps_insensitive_absolute_loss', 'quantile_loss', 'huber_loss',"
"'softmax_loss', 'auto', "
"got: %s" % value)
self._set(loss=value)
def getWd(self):
return self.getOrDefault(self.wd)
def setWd(self, value):
if value < 0:
raise ValueError("wd must be >= 0, got: %s" % value)
self._set(wd=value)
def getL1(self):
return self.getOrDefault(self.l1)
def setL1(self, value):
if value < 0:
raise ValueError("l1 must be >= 0, got: %s" % value)
self._set(l1=value)
def getMomentum(self):
return self.getOrDefault(self.momentum)
def setMomentum(self, value):
if value >= 1 or value < 0:
raise ValueError("momentum must be within [0, 1), got: %s" % value)
self._set(momentum=value)
def getLearningRate(self):
return self.getOrDefault(self.learning_rate)
def setLearningRate(self, value):
if isinstance(value, numbers.Real) and value <= 0:
raise ValueError("learning_rate must be 'auto' or > 0, got: %s" % value)
if value != 'auto' and float(value) <= 0:
raise ValueError("learning_rate must be 'auto' or > 0, got: %s" % value)
self._set(learning_rate=str(value))
def getBeta1(self):
return self.getOrDefault(self.beta_1)
def setBeta1(self, value):
if value >= 1 or value < 0:
raise ValueError("beta_1 must be within [0, 1), got: %s" % value)
self._set(beta_1=value)
def getBeta2(self):
return self.getOrDefault(self.beta_2)
def setBeta2(self, value):
if value >= 1 or value < 0:
raise ValueError("beta_2 must be within [0, 1), got: %s" % value)
self._set(beta_2=value)
def getBiasLrMult(self):
return self.getOrDefault(self.bias_lr_mult)
def setBiasLrMult(self, value):
if value <= 0:
raise ValueError("bias_lr_mult must be > 0, got: %s" % value)
self._set(bias_lr_mult=value)
def getBiasWdMult(self):
return self.getOrDefault(self.bias_wd_mult)
def setBiasWdMult(self, value):
if value < 0:
raise ValueError("bias_wd_mult must be >= 0, got: %s" % value)
self._set(bias_wd_mult=value)
def getUseLrScheduler(self):
value = self.getOrDefault(self.use_lr_scheduler)
if value == 'True':
return True
else:
return False
def setUseLrScheduler(self, value):
if value not in ('True', 'False'):
raise ValueError("use_lr_scheduler must be 'True' or 'False', got %s" % value)
self._set(use_lr_scheduler=value)
def getLrSchedulerStep(self):
return self.getOrDefault(self.lr_scheduler_step)
def setLrSchedulerStep(self, value):
if value <= 0:
raise ValueError("lr_scheduler_step must be > 0, got: %s" % value)
self._set(lr_scheduler_step=value)
def getLrSchedulerFactor(self):
return self.getOrDefault(self.lr_scheduler_factor)
def setLrSchedulerFactor(self, value):
if value >= 1 or value <= 0:
raise ValueError("lr_scheduler_factor must be in (0, 1), got: %s" % value)
self._set(lr_scheduler_factor=value)
def getLrSchedulerMinimumLr(self):
return self.getOrDefault(self.lr_scheduler_minimum_lr)
def setLrSchedulerMinimumLr(self, value):
if value <= 0:
raise ValueError("lr_scheduler_minimum_lr must be > 0, got: %s" % value)
self._set(lr_scheduler_minimum_lr=value)
def getNormalizeData(self):
value = self.getOrDefault(self.normalize_data)
if value == 'True':
return True
else:
return False
def setNormalizeData(self, value):
if value not in ('True', 'False'):
raise ValueError("normalize_data must be 'True' or 'False', got %s" % value)
self._set(normalize_data=value)
def getNormalizeLabel(self):
value = self.getOrDefault(self.normalize_label)
if value == 'True':
return True
else:
return False
def setNormalizeLabel(self, value):
if value not in ('True', 'False'):
raise ValueError("normalize_label must be 'True' or 'False', got %s" % value)
self._set(normalize_label=value)
def getUnbiasData(self):
value = self.getOrDefault(self.unbias_data)
if value == 'True':
return True
else:
return False
def setUnbiasData(self, value):
if value not in ('True', 'False'):
raise ValueError("unbias_data must be 'True' or 'False', got %s" % value)
self._set(unbias_data=value)
def getUnbiasLabel(self):
value = self.getOrDefault(self.unbias_label)
if value == 'True':
return True
else:
return False
def setUnbiasLabel(self, value):
if value not in ('True', 'False'):
raise ValueError("unbias_label must be 'True' or 'False', got %s" % value)
self._set(unbias_label=value)
def getNumPointForScaler(self):
return self.getOrDefault(self.num_point_for_scaler)
def setNumPointForScaler(self, value):
if value <= 0:
raise ValueError("num_point_for_scaler must be > 0, got: %s" % value)
self._set(num_point_for_scaler=value)
def getEarlyStoppingPatience(self):
return self.getOrDefault(self.early_stopping_patience)
def setEarlyStoppingPatience(self, value):
if value <= 0:
raise ValueError("early_stopping_patience must be > 0, got: %s" % value)
self._set(early_stopping_patience=value)
def getEarlyStoppingTolerance(self):
return self.getOrDefault(self.early_stopping_tolerance)
def setEarlyStoppingTolerance(self, value):
if value <= 0:
raise ValueError("early_stopping_tolerance must be > 0, got: %s" % value)
self._set(early_stopping_tolerance=value)
def getMargin(self):
return self.getOrDefault(self.margin)
def setMargin(self, value):
if value <= 0:
raise ValueError("margin must be > 0, got: %s" % value)
self._set(margin=value)
def getQuantile(self):
return self.getOrDefault(self.quantile)
def setQuantile(self, value):
if value <= 0 or value >= 1:
raise ValueError("quantile must be in (0, 1), got: %s" % value)
self._set(quantile=value)
def getLossInsensitivity(self):
return self.getOrDefault(self.loss_insensitivity)
def setLossInsensitivity(self, value):
if value <= 0:
raise ValueError("loss_insensitivity must be > 0, got: %s" % value)
self._set(loss_insensitivity=value)
def getHuberDelta(self):
return self.getOrDefault(self.huber_delta)
def setHuberDelta(self, value):
if value <= 0:
raise ValueError("huber_delta must be > 0, got: %s" % value)
self._set(huber_delta=value)
def getFBeta(self):
return self.getOrDefault(self.f_beta)
def setFBeta(self, value):
if value <= 0:
raise ValueError("f_beta must be > 0, got: %s" % value)
self._set(f_beta=value)
class LinearLearnerBinaryClassifier(SageMakerEstimatorBase, LinearLearnerParams):
"""
A :class:`~sagemaker_pyspark.SageMakerEstimator` that runs a Linear Learner training job in
"binary classifier" mode in SageMaker and returns a :class:`~sagemaker_pyspark.SageMakerModel`
that can be used to transform a DataFrame using the hosted Linear Learner model. The Linear
Learner Binary Classifier is useful for classifying examples into one of two classes.
Amazon SageMaker Linear Learner trains on RecordIO-encoded Amazon Record protobuf data.
SageMaker pyspark writes a DataFrame to S3 by selecting a column of Vectors named "features"
and, if present, a column of Doubles named "label". These names are configurable by passing a
dictionary with entries in trainingSparkDataFormatOptions with key "labelColumnName" or
"featuresColumnName", with values corresponding to the desired label and features columns.
Inferences made against an Endpoint hosting a Linear Learner Binary classifier model contain
a "score" field and a "predicted_label" field, both appended to the input DataFrame as Doubles.
Args:
sageMakerRole (IAMRole): The SageMaker TrainingJob and Hosting IAM Role. Used by
SageMaker to access S3 and ECR Resources. SageMaker hosted Endpoint instances
launched by this Estimator run with this role.
trainingInstanceType (str): The SageMaker TrainingJob Instance Type to use.
trainingInstanceCount (int): The number of instances of instanceType to run an
SageMaker Training Job with.
endpointInstanceType (str): The SageMaker Endpoint Config instance type.
endpointInitialInstanceCount (int): The SageMaker Endpoint Config minimum number of
instances that can be used to host modelImage.
requestRowSerializer (RequestRowSerializer): Serializes Spark DataFrame Rows for
transformation by Models built from this Estimator.
responseRowDeserializer (ResponseRowDeserializer): Deserializes an Endpoint response into a
series of Rows.
trainingInputS3DataPath (S3Resource): An S3 location to upload SageMaker Training Job input
data to.
trainingOutputS3DataPath (S3Resource): An S3 location for SageMaker to store Training Job
output data to.
trainingInstanceVolumeSizeInGB (int): The EBS volume size in gigabytes of each instance.
trainingProjectedColumns (List): The columns to project from the Dataset being fit before
training. If an Optional.empty is passed then no specific projection will occur and
all columns will be serialized.
trainingChannelName (str): The SageMaker Channel name to input serialized Dataset fit
input to.
trainingContentType (str): The MIME type of the training data.
trainingS3DataDistribution (str): The SageMaker Training Job S3 data distribution scheme.
trainingSparkDataFormat (str): The Spark Data Format name used to serialize the Dataset
being fit for input to SageMaker.
trainingSparkDataFormatOptions (dict): The Spark Data Format Options used during
serialization of the Dataset being fit.
trainingInputMode (str): The SageMaker Training Job Channel input mode.
trainingCompressionCodec (str): The type of compression to use when serializing the
Dataset being fit for input to SageMaker.
trainingMaxRuntimeInSeconds (int): A SageMaker Training Job Termination Condition
MaxRuntimeInHours.
trainingKmsKeyId (str): A KMS key ID for the Output Data Source.
modelEnvironmentVariables (dict): The environment variables that SageMaker will set on the
model container during execution.
endpointCreationPolicy (EndpointCreationPolicy): Defines how a SageMaker Endpoint
referenced by a SageMakerModel is created.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send CreateTrainingJob,
CreateModel, and CreateEndpoint requests.
region (str): The region in which to run the algorithm. If not specified, gets the region
from the DefaultAwsRegionProviderChain.
s3Client (AmazonS3): Used to create a bucket for staging SageMaker Training Job
input and/or output if either are set to S3AutoCreatePath.
stsClient (AmazonSTS): Used to resolve the account number when creating staging
input / output buckets.
modelPrependInputRowsToTransformationRows (bool): Whether the transformation result on
Models built by this Estimator should also include the input Rows. If true,
each output Row is formed by a concatenation of the input Row with the corresponding
Row produced by SageMaker Endpoint invocation, produced by responseRowDeserializer.
If false, each output Row is just taken from responseRowDeserializer.
deleteStagingDataAfterTraining (bool): Whether to remove the training data on s3 after
training is complete or failed.
namePolicyFactory (NamePolicyFactory): The NamePolicyFactory to use when naming SageMaker
entities created during fit.
uid (str): The unique identifier of this Estimator. Used to represent this stage in Spark
ML pipelines.
"""
_wrapped_class = \
"com.amazonaws.services.sagemaker.sparksdk.algorithms.LinearLearnerBinaryClassifier"
binary_classifier_model_selection_criteria = Param(
Params._dummy(),
"binary_classifier_model_selection_criteria",
"Pick the model with best criteria from the validation dataset for predictor_type = "
"binary_classifier. Supported options: 'accuracy', 'f1', 'precision_at_target_recall',"
" 'recall_at_target_precision', 'cross_entropy_loss', 'f_beta' and 'loss_function'. ",
typeConverter=TypeConverters.toString)
target_recall = Param(Params._dummy(), "target_recall",
"Applicable if binary_classifier_model_selection_criteria is "
"precision_at_target_recall. Must be in range [0, 1]. ",
typeConverter=TypeConverters.toFloat)
target_precision = Param(Params._dummy(), "target_precision",
"Applicable if binary_classifier_model_selection_criteria is "
"recall_at_target_precision. Must be in range [0, 1]. ",
typeConverter=TypeConverters.toFloat)
positive_example_weight_mult = Param(Params._dummy(), "positive_example_weight_mult",
"Weight assigned to positive examples when training a"
"binary classifier. The weight of negative examples is"
"fixed at 1. If balanced, then a weight will be selected"
"so that errors in classifying negative vs. positive"
"examples have equal impact on the training loss. If auto,"
"the algorithm will attempt to select the weight that"
"optimizes performance. Must be string 'auto',"
"'balanced', or float > 0",
typeConverter=TypeConverters.toString)
def __init__(
self,
trainingInstanceType,
trainingInstanceCount,
endpointInstanceType,
endpointInitialInstanceCount,
sagemakerRole=IAMRoleFromConfig(),
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=LinearLearnerBinaryClassifierProtobufResponseRowDeserializer(),
trainingInputS3DataPath=S3AutoCreatePath(),
trainingOutputS3DataPath=S3AutoCreatePath(),
trainingInstanceVolumeSizeInGB=1024,
trainingProjectedColumns=None,
trainingChannelName="train",
trainingContentType=None,
trainingS3DataDistribution="ShardedByS3Key",
trainingSparkDataFormat="sagemaker",
trainingSparkDataFormatOptions=None,
trainingInputMode="File",
trainingCompressionCodec=None,
trainingMaxRuntimeInSeconds=24*60*60,
trainingKmsKeyId=None,
modelEnvironmentVariables=None,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_CONSTRUCT,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
region=None,
s3Client=SageMakerClients.create_s3_default_client(),
stsClient=SageMakerClients.create_sts_default_client(),
modelPrependInputRowsToTransformationRows=True,
deleteStagingDataAfterTraining=True,
namePolicyFactory=RandomNamePolicyFactory(),
uid=None,
javaObject=None):
if trainingSparkDataFormatOptions is None:
trainingSparkDataFormatOptions = {}
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
if uid is None:
uid = Identifiable._randomUID()
kwargs = locals().copy()
del kwargs['self']
super(LinearLearnerBinaryClassifier, self).__init__(**kwargs)
default_params = {
'predictor_type': 'binary_classifier'
}
self._setDefault(**default_params)
def _get_java_obj(self, **kwargs):
if 'javaObject' in kwargs and kwargs['javaObject'] is not None:
return kwargs['javaObject']
else:
return self._new_java_obj(
LinearLearnerBinaryClassifier._wrapped_class,
kwargs['sagemakerRole'],
kwargs['trainingInstanceType'],
kwargs['trainingInstanceCount'],
kwargs['endpointInstanceType'],
kwargs['endpointInitialInstanceCount'],
kwargs['requestRowSerializer'],
kwargs['responseRowDeserializer'],
kwargs['trainingInputS3DataPath'],
kwargs['trainingOutputS3DataPath'],
kwargs['trainingInstanceVolumeSizeInGB'],
Option(kwargs['trainingProjectedColumns']),
kwargs['trainingChannelName'],
Option(kwargs['trainingContentType']),
kwargs['trainingS3DataDistribution'],
kwargs['trainingSparkDataFormat'],
kwargs['trainingSparkDataFormatOptions'],
kwargs['trainingInputMode'],
Option(kwargs['trainingCompressionCodec']),
kwargs['trainingMaxRuntimeInSeconds'],
Option(kwargs['trainingKmsKeyId']),
kwargs['modelEnvironmentVariables'],
kwargs['endpointCreationPolicy'],
kwargs['sagemakerClient'],
Option(kwargs['region']),
kwargs['s3Client'],
kwargs['stsClient'],
kwargs['modelPrependInputRowsToTransformationRows'],
kwargs['deleteStagingDataAfterTraining'],
kwargs['namePolicyFactory'],
kwargs['uid']
)
def getBinaryClassifierModelSelectionCriteria(self):
return self.getOrDefault(self.binary_classifier_model_selection_criteria)
def setBinaryClassifierModelSelectionCriteria(self, value):
if value not in ('accuracy', 'f1', 'precision_at_target_recall',
'recall_at_target_precision', 'cross_entropy_loss', 'f_beta',
'loss_function'):
raise ValueError("binary_classifier_model_selection_criteria must be 'accuracy', 'f1', "
"'precision_at_target_recall','recall_at_target_precision',"
" 'cross_entropy_loss', 'f_beta' and 'loss_function', got: %s" % value)
self._set(binary_classifier_model_selection_criteria=value)
def getTargetRecall(self):
return self.getOrDefault(self.target_recall)
def setTargetRecall(self, value):
if value > 1 or value < 0:
raise ValueError("target_recall must be within [0, 1], got: %s" % value)
self._set(target_recall=value)
def getTargetPrecision(self):
return self.getOrDefault(self.target_precision)
def setTargetPrecision(self, value):
if value > 1 or value < 0:
raise ValueError("target_precision must be within [0, 1], got: %s" % value)
self._set(target_precision=value)
def getPositiveExampleWeightMult(self):
return self.getOrDefault(self.positive_example_weight_mult)
def setPositiveExampleWeightMult(self, value):
if isinstance(value, numbers.Real) and value <= 0:
raise ValueError("positive_example_weight_mult must be 'auto', 'balanced' or > 0, "
"got: %s" % value)
if value not in ('auto', 'balanced') and float(value) <= 0:
raise ValueError("positive_example_weight_mult must be 'auto', 'balanced' or > 0, "
"got: %s" % value)
self._set(positive_example_weight_mult=str(value))
@classmethod
def _from_java(cls, javaObject):
return LinearLearnerBinaryClassifier(sagemakerRole=None, javaObject=javaObject)
class LinearLearnerMultiClassClassifier(SageMakerEstimatorBase, LinearLearnerParams):
"""
A :class:`~sagemaker_pyspark.SageMakerEstimator` that runs a Linear Learner training job in
"multiclass classifier" mode in SageMaker and returns :class:`~sagemaker_pyspark.SageMakerModel`
that can be used to transform a DataFrame using the hosted Linear Learner model. The Linear
Learner Binary Classifier is useful for classifying examples into one of two classes.
Amazon SageMaker Linear Learner trains on RecordIO-encoded Amazon Record protobuf data.
SageMaker pyspark writes a DataFrame to S3 by selecting a column of Vectors named "features"
and, if present, a column of Doubles named "label". These names are configurable by passing a
dictionary with entries in trainingSparkDataFormatOptions with key "labelColumnName" or
"featuresColumnName", with values corresponding to the desired label and features columns.
Inferences made against an Endpoint hosting a Linear Learner Binary classifier model contain
a "score" field and a "predicted_label" field, both appended to the input DataFrame as Doubles.
Args:
sageMakerRole (IAMRole): The SageMaker TrainingJob and Hosting IAM Role. Used by
SageMaker to access S3 and ECR Resources. SageMaker hosted Endpoint instances
launched by this Estimator run with this role.
trainingInstanceType (str): The SageMaker TrainingJob Instance Type to use.
trainingInstanceCount (int): The number of instances of instanceType to run an
SageMaker Training Job with.
endpointInstanceType (str): The SageMaker Endpoint Config instance type.
endpointInitialInstanceCount (int): The SageMaker Endpoint Config minimum number of
instances that can be used to host modelImage.
requestRowSerializer (RequestRowSerializer): Serializes Spark DataFrame Rows for
transformation by Models built from this Estimator.
responseRowDeserializer (ResponseRowDeserializer): Deserializes an Endpoint response into a
series of Rows.
trainingInputS3DataPath (S3Resource): An S3 location to upload SageMaker Training Job input
data to.
trainingOutputS3DataPath (S3Resource): An S3 location for SageMaker to store Training Job
output data to.
trainingInstanceVolumeSizeInGB (int): The EBS volume size in gigabytes of each instance.
trainingProjectedColumns (List): The columns to project from the Dataset being fit before
training. If an Optional.empty is passed then no specific projection will occur and
all columns will be serialized.
trainingChannelName (str): The SageMaker Channel name to input serialized Dataset fit
input to.
trainingContentType (str): The MIME type of the training data.
trainingS3DataDistribution (str): The SageMaker Training Job S3 data distribution scheme.
trainingSparkDataFormat (str): The Spark Data Format name used to serialize the Dataset
being fit for input to SageMaker.
trainingSparkDataFormatOptions (dict): The Spark Data Format Options used during
serialization of the Dataset being fit.
trainingInputMode (str): The SageMaker Training Job Channel input mode.
trainingCompressionCodec (str): The type of compression to use when serializing the
Dataset being fit for input to SageMaker.
trainingMaxRuntimeInSeconds (int): A SageMaker Training Job Termination Condition
MaxRuntimeInHours.
trainingKmsKeyId (str): A KMS key ID for the Output Data Source.
modelEnvironmentVariables (dict): The environment variables that SageMaker will set on the
model container during execution.
endpointCreationPolicy (EndpointCreationPolicy): Defines how a SageMaker Endpoint
referenced by a SageMakerModel is created.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send CreateTrainingJob,
CreateModel, and CreateEndpoint requests.
region (str): The region in which to run the algorithm. If not specified, gets the region
from the DefaultAwsRegionProviderChain.
s3Client (AmazonS3): Used to create a bucket for staging SageMaker Training Job
input and/or output if either are set to S3AutoCreatePath.
stsClient (AmazonSTS): Used to resolve the account number when creating staging
input / output buckets.
modelPrependInputRowsToTransformationRows (bool): Whether the transformation result on
Models built by this Estimator should also include the input Rows. If true,
each output Row is formed by a concatenation of the input Row with the corresponding
Row produced by SageMaker Endpoint invocation, produced by responseRowDeserializer.
If false, each output Row is just taken from responseRowDeserializer.
deleteStagingDataAfterTraining (bool): Whether to remove the training data on s3 after
training is complete or failed.
namePolicyFactory (NamePolicyFactory): The NamePolicyFactory to use when naming SageMaker
entities created during fit.
uid (str): The unique identifier of this Estimator. Used to represent this stage in Spark
ML pipelines.
"""
_wrapped_class = \
"com.amazonaws.services.sagemaker.sparksdk.algorithms.LinearLearnerMultiClassClassifier"
num_classes = Param(Params._dummy(), "num_classes",
"The number of classes for the response variable. The classes are assumed"
"to be labeled 0, ..., num_classes - 1. Must be in range [3, 1000000].",
typeConverter=TypeConverters.toInt)
accuracy_top_k = Param(Params._dummy(), "accuracy_top_k",
"The value of k when computing the Top K Accuracy metric for multiclass"
"classification. An example is scored as correct if the model assigns"
"one of the top k scores to the true label. Must be > 0. ",
typeConverter=TypeConverters.toInt)
balance_multiclass_weights = Param(Params._dummy(), "balance_multiclass_weights",
"Whether to use class weights which give each class equal"
"importance in the loss function.",
typeConverter=TypeConverters.toString)
def __init__(
self,
trainingInstanceType,
trainingInstanceCount,
endpointInstanceType,
endpointInitialInstanceCount,
sagemakerRole=IAMRoleFromConfig(),
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=LinearLearnerMultiClassClassifierProtobufResponseRowDeserializer(), # noqa
trainingInputS3DataPath=S3AutoCreatePath(),
trainingOutputS3DataPath=S3AutoCreatePath(),
trainingInstanceVolumeSizeInGB=1024,
trainingProjectedColumns=None,
trainingChannelName="train",
trainingContentType=None,
trainingS3DataDistribution="ShardedByS3Key",
trainingSparkDataFormat="sagemaker",
trainingSparkDataFormatOptions=None,
trainingInputMode="File",
trainingCompressionCodec=None,
trainingMaxRuntimeInSeconds=24*60*60,
trainingKmsKeyId=None,
modelEnvironmentVariables=None,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_CONSTRUCT,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
region=None,
s3Client=SageMakerClients.create_s3_default_client(),
stsClient=SageMakerClients.create_sts_default_client(),
modelPrependInputRowsToTransformationRows=True,
deleteStagingDataAfterTraining=True,
namePolicyFactory=RandomNamePolicyFactory(),
uid=None,
javaObject=None):
if trainingSparkDataFormatOptions is None:
trainingSparkDataFormatOptions = {}
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
if uid is None:
uid = Identifiable._randomUID()
kwargs = locals().copy()
del kwargs['self']
super(LinearLearnerMultiClassClassifier, self).__init__(**kwargs)
default_params = {
'predictor_type': 'multiclass_classifier'
}
self._setDefault(**default_params)
def _get_java_obj(self, **kwargs):
if 'javaObject' in kwargs and kwargs['javaObject'] is not None:
return kwargs['javaObject']
else:
return self._new_java_obj(
LinearLearnerMultiClassClassifier._wrapped_class,
kwargs['sagemakerRole'],
kwargs['trainingInstanceType'],
kwargs['trainingInstanceCount'],
kwargs['endpointInstanceType'],
kwargs['endpointInitialInstanceCount'],
kwargs['requestRowSerializer'],
kwargs['responseRowDeserializer'],
kwargs['trainingInputS3DataPath'],
kwargs['trainingOutputS3DataPath'],
kwargs['trainingInstanceVolumeSizeInGB'],
Option(kwargs['trainingProjectedColumns']),
kwargs['trainingChannelName'],
Option(kwargs['trainingContentType']),
kwargs['trainingS3DataDistribution'],
kwargs['trainingSparkDataFormat'],
kwargs['trainingSparkDataFormatOptions'],
kwargs['trainingInputMode'],
Option(kwargs['trainingCompressionCodec']),
kwargs['trainingMaxRuntimeInSeconds'],
Option(kwargs['trainingKmsKeyId']),
kwargs['modelEnvironmentVariables'],
kwargs['endpointCreationPolicy'],
kwargs['sagemakerClient'],
Option(kwargs['region']),
kwargs['s3Client'],
kwargs['stsClient'],
kwargs['modelPrependInputRowsToTransformationRows'],
kwargs['deleteStagingDataAfterTraining'],
kwargs['namePolicyFactory'],
kwargs['uid']
)
def getNumClasses(self):
return self.getOrDefault(self.num_classes)
def setNumClasses(self, value):
if value > 1000000 or value < 3:
raise ValueError("num_classes must be in [3, 1000000], got: %s" % value)
self._set(num_classes=value)
def getAccuracyTopK(self):
return self.getOrDefault(self.accuracy_top_k)
def setAccuracyTopK(self, value):
if value <= 0:
raise ValueError("accuracy_top_k must be > 0, got: %s" % value)
self._set(accuracy_top_k=value)
def getBalanceMultiClassWeights(self):
value = self.getOrDefault(self.balance_multiclass_weights)
if value == 'True':
return True
else:
return False
def setBalanceMultiClassWeights(self, value):
self._set(balance_multiclass_weights=value)
@classmethod
def _from_java(cls, javaObject):
return LinearLearnerMultiClassClassifier(sagemakerRole=None, javaObject=javaObject)
class LinearLearnerRegressor(SageMakerEstimatorBase, LinearLearnerParams):
"""
A :class:`~sagemaker_pyspark.SageMakerEstimator` that runs a Linear Learner training job in
"regressor" mode in SageMaker and returns a :class:`~sagemaker_pyspark.SageMakerModel` that
can be used to transform a DataFrame using the hosted Linear Learner model. The Linear Learner
Regressor is useful for predicting a real-valued label from training examples.
Amazon SageMaker Linear Learner trains on RecordIO-encoded Amazon Record protobuf data.
SageMaker pyspark writes a DataFrame to S3 by selecting a column of Vectors named "features"
and, if present, a column of Doubles named "label". These names are configurable by passing a
dictionary with entries in trainingSparkDataFormatOptions with key "labelColumnName" or
"featuresColumnName", with values corresponding to the desired label and features columns.
For inference against a hosted Endpoint, the SageMakerModel returned by :meth :`fit()` by
Linear Learner uses :class:`~sagemaker_pyspark.transformation
.serializers.ProtobufRequestRowSerializer` to serialize Rows into RecordIO-encoded Amazon
Record protobuf messages, by default selecting the column named "features" expected to contain
a Vector of Doubles.
Inferences made against an Endpoint hosting a Linear Learner Regressor model contain a "score"
field appended to the input DataFrame as a Double.
Args:
sageMakerRole (IAMRole): The SageMaker TrainingJob and Hosting IAM Role. Used by
SageMaker to access S3 and ECR Resources. SageMaker hosted Endpoint instances
launched by this Estimator run with this role.
trainingInstanceType (str): The SageMaker TrainingJob Instance Type to use.
trainingInstanceCount (int): The number of instances of instanceType to run an
SageMaker Training Job with.
endpointInstanceType (str): The SageMaker Endpoint Config instance type.
endpointInitialInstanceCount (int): The SageMaker Endpoint Config minimum number of
instances that can be used to host modelImage.
requestRowSerializer (RequestRowSerializer): Serializes Spark DataFrame Rows for
transformation by Models built from this Estimator.
responseRowDeserializer (ResponseRowDeserializer): Deserializes an Endpoint response into a
series of Rows.
trainingInputS3DataPath (S3Resource): An S3 location to upload SageMaker Training Job input
data to.
trainingOutputS3DataPath (S3Resource): An S3 location for SageMaker to store Training Job
output data to.
trainingInstanceVolumeSizeInGB (int): The EBS volume size in gigabytes of each instance.
trainingProjectedColumns (List): The columns to project from the Dataset being fit before
training. If an Optional.empty is passed then no specific projection will occur and
all columns will be serialized.
trainingChannelName (str): The SageMaker Channel name to input serialized Dataset fit
input to.
trainingContentType (str): The MIME type of the training data.
trainingS3DataDistribution (str): The SageMaker Training Job S3 data distribution scheme.
trainingSparkDataFormat (str): The Spark Data Format name used to serialize the Dataset
being fit for input to SageMaker.
trainingSparkDataFormatOptions (dict): The Spark Data Format Options used during
serialization of the Dataset being fit.
trainingInputMode (str): The SageMaker Training Job Channel input mode.
trainingCompressionCodec (str): The type of compression to use when serializing the
Dataset being fit for input to SageMaker.
trainingMaxRuntimeInSeconds (int): A SageMaker Training Job Termination Condition
MaxRuntimeInHours.
trainingKmsKeyId (str): A KMS key ID for the Output Data Source.
modelEnvironmentVariables (dict): The environment variables that SageMaker will set on the
model container during execution.
endpointCreationPolicy (EndpointCreationPolicy): Defines how a SageMaker Endpoint
referenced by a SageMakerModel is created.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send CreateTrainingJob,
CreateModel, and CreateEndpoint requests.
region (str): The region in which to run the algorithm. If not specified, gets the region
from the DefaultAwsRegionProviderChain.
s3Client (AmazonS3): Used to create a bucket for staging SageMaker Training Job
input and/or output if either are set to S3AutoCreatePath.
stsClient (AmazonSTS): Used to resolve the account number when creating staging
input / output buckets.
modelPrependInputRowsToTransformationRows (bool): Whether the transformation result on
Models built by this Estimator should also include the input Rows. If true,
each output Row is formed by a concatenation of the input Row with the corresponding
Row produced by SageMaker Endpoint invocation, produced by responseRowDeserializer.
If false, each output Row is just taken from responseRowDeserializer.
deleteStagingDataAfterTraining (bool): Whether to remove the training data on s3 after
training is complete or failed.
namePolicyFactory (NamePolicyFactory): The NamePolicyFactory to use when naming SageMaker
entities created during fit.
uid (str): The unique identifier of this Estimator. Used to represent this stage in Spark
ML pipelines.
"""
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.algorithms.LinearLearnerRegressor"
def __init__(self,
trainingInstanceType,
trainingInstanceCount,
endpointInstanceType,
endpointInitialInstanceCount,
sagemakerRole=IAMRoleFromConfig(),
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=LinearLearnerRegressorProtobufResponseRowDeserializer(),
trainingInputS3DataPath=S3AutoCreatePath(),
trainingOutputS3DataPath=S3AutoCreatePath(),
trainingInstanceVolumeSizeInGB=1024,
trainingProjectedColumns=None,
trainingChannelName="train",
trainingContentType=None,
trainingS3DataDistribution="ShardedByS3Key",
trainingSparkDataFormat="sagemaker",
trainingSparkDataFormatOptions=None,
trainingInputMode="File",
trainingCompressionCodec=None,
trainingMaxRuntimeInSeconds=24*60*60,
trainingKmsKeyId=None,
modelEnvironmentVariables=None,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_CONSTRUCT,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
region=None,
s3Client=SageMakerClients.create_s3_default_client(),
stsClient=SageMakerClients.create_sts_default_client(),
modelPrependInputRowsToTransformationRows=True,
deleteStagingDataAfterTraining=True,
namePolicyFactory=RandomNamePolicyFactory(),
uid=None,
javaObject=None):
if trainingSparkDataFormatOptions is None:
trainingSparkDataFormatOptions = {}
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
if uid is None:
uid = Identifiable._randomUID()
kwargs = locals().copy()
del kwargs['self']
super(LinearLearnerRegressor, self).__init__(**kwargs)
default_params = {
'predictor_type': 'regressor'
}
self._setDefault(**default_params)
def _get_java_obj(self, **kwargs):
if 'javaObject' in kwargs and kwargs['javaObject'] is not None:
return kwargs['javaObject']
else:
return self._new_java_obj(
LinearLearnerRegressor._wrapped_class,
kwargs['sagemakerRole'],
kwargs['trainingInstanceType'],
kwargs['trainingInstanceCount'],
kwargs['endpointInstanceType'],
kwargs['endpointInitialInstanceCount'],
kwargs['requestRowSerializer'],
kwargs['responseRowDeserializer'],
kwargs['trainingInputS3DataPath'],
kwargs['trainingOutputS3DataPath'],
kwargs['trainingInstanceVolumeSizeInGB'],
Option(kwargs['trainingProjectedColumns']),
kwargs['trainingChannelName'],
Option(kwargs['trainingContentType']),
kwargs['trainingS3DataDistribution'],
kwargs['trainingSparkDataFormat'],
kwargs['trainingSparkDataFormatOptions'],
kwargs['trainingInputMode'],
Option(kwargs['trainingCompressionCodec']),
kwargs['trainingMaxRuntimeInSeconds'],
Option(kwargs['trainingKmsKeyId']),
kwargs['modelEnvironmentVariables'],
kwargs['endpointCreationPolicy'],
kwargs['sagemakerClient'],
Option(kwargs['region']),
kwargs['s3Client'],
kwargs['stsClient'],
kwargs['modelPrependInputRowsToTransformationRows'],
kwargs['deleteStagingDataAfterTraining'],
kwargs['namePolicyFactory'],
kwargs['uid']
)
@classmethod
def _from_java(cls, javaObject):
return LinearLearnerRegressor(sagemakerRole=None, javaObject=javaObject) | /sagemaker_pyspark-1.4.5.tar.gz/sagemaker_pyspark-1.4.5/src/sagemaker_pyspark/algorithms/LinearLearnerSageMakerEstimator.py | 0.80456 | 0.497986 | LinearLearnerSageMakerEstimator.py | pypi |
from pyspark.ml.param import Params, Param, TypeConverters
from pyspark.ml.util import Identifiable
from sagemaker_pyspark import (SageMakerEstimatorBase, S3AutoCreatePath, Option, IAMRoleFromConfig,
EndpointCreationPolicy, SageMakerClients, RandomNamePolicyFactory)
from sagemaker_pyspark.transformation.serializers import ProtobufRequestRowSerializer
from sagemaker_pyspark.transformation.deserializers import (
FactorizationMachinesBinaryClassifierDeserializer,
FactorizationMachinesRegressorDeserializer)
class FactorizationMachinesParams(Params):
feature_dim = Param(Params._dummy(), "feature_dim",
"The dimension of the input vectors. Must be > 0. ",
typeConverter=TypeConverters.toInt)
num_factors = Param(Params._dummy(), "num_factors",
"Dimensionality of factorization. Must be > 0. ",
typeConverter=TypeConverters.toInt)
predictor_type = Param(Params._dummy(), "predictor_type",
"Whether training is for binary classification or regression. "
"Supported options: 'binary_classifier', and 'regressor'. ",
typeConverter=TypeConverters.toString)
mini_batch_size = Param(Params._dummy(), "mini_batch_size",
"The number of examples in a mini-batch. Must be > 0. ",
typeConverter=TypeConverters.toInt)
epochs = Param(Params._dummy(), "epochs",
"The number of passes done over the training data. Must be > 0. ",
typeConverter=TypeConverters.toInt)
clip_gradient = Param(Params._dummy(), "clip_gradient",
"Clip the gradient by projecting onto"
"the box [-clip_gradient, +clip_gradient]. ",
typeConverter=TypeConverters.toFloat)
eps = Param(Params._dummy(), "eps",
"Small value to avoid division by 0. ",
typeConverter=TypeConverters.toFloat)
rescale_grad = Param(Params._dummy(), "rescale_grad",
"Multiplies the gradient with this value before updating. ",
typeConverter=TypeConverters.toFloat)
bias_lr = Param(Params._dummy(), "bias_lr",
"Multiplies the gradient with this value before updating. Must be >= 0. ",
typeConverter=TypeConverters.toFloat)
linear_lr = Param(Params._dummy(), "linear_lr",
"Learning rate for linear terms. Must be >= 0. ",
typeConverter=TypeConverters.toFloat)
factors_lr = Param(Params._dummy(), "factors_lr",
"Learning rate for factorization terms. Must be >= 0. ",
typeConverter=TypeConverters.toFloat)
bias_wd = Param(Params._dummy(), "bias_wd",
"Weight decay for the bias term. Must be >= 0. ",
typeConverter=TypeConverters.toFloat)
linear_wd = Param(Params._dummy(), "linear_wd",
"Weight decay for linear terms. Must be >= 0. ",
typeConverter=TypeConverters.toFloat)
factors_wd = Param(Params._dummy(), "factors_wd",
"Weight decay for factorization terms. Must be >= 0. ",
typeConverter=TypeConverters.toFloat)
bias_init_method = Param(Params._dummy(), "bias_init_method",
"Initialization method for the bias supports"
" 'normal', 'uniform' and 'constant'. ",
typeConverter=TypeConverters.toString)
bias_init_scale = Param(Params._dummy(), "bias_init_scale",
"Range for bias term uniform initialization. Must be >= 0. ",
typeConverter=TypeConverters.toFloat)
bias_init_sigma = Param(Params._dummy(), "bias_init_sigma",
"Standard deviation to initialize bias terms. Must be >= 0. ",
typeConverter=TypeConverters.toFloat)
bias_init_value = Param(Params._dummy(), "bias_init_value",
"Initial value for the bias term. ",
typeConverter=TypeConverters.toFloat)
linear_init_method = Param(Params._dummy(), "linear_init_method",
"Initialization method for linear term,"
" supports: 'normal', 'uniform' and 'constant'. ",
typeConverter=TypeConverters.toString)
linear_init_scale = Param(Params._dummy(), "linear_init_scale",
"Range for linear term uniform initialization. Must be >= 0. ",
typeConverter=TypeConverters.toFloat)
linear_init_sigma = Param(Params._dummy(), "linear_init_sigma",
"Standard deviation to initialize linear terms. Must be >= 0. ",
typeConverter=TypeConverters.toFloat)
linear_init_value = Param(Params._dummy(), "linear_init_value",
"Initial value for linear term. ",
typeConverter=TypeConverters.toFloat)
factors_init_method = Param(Params._dummy(), "factors_init_method",
"Init method for factorization terms,"
" supports: 'normal', 'uniform' and 'constant'. ",
typeConverter=TypeConverters.toString)
factors_init_scale = Param(Params._dummy(), "factors_init_scale",
"Range for factorization terms uniform initialization."
" Must be >= 0. ",
typeConverter=TypeConverters.toFloat)
factors_init_sigma = Param(Params._dummy(), "factors_init_sigma",
"Standard deviation to initialize factorization terms."
" Must be >= 0. ",
typeConverter=TypeConverters.toFloat)
factors_init_value = Param(Params._dummy(), "factors_init_value",
"Initial value for factorization term. ",
typeConverter=TypeConverters.toFloat)
def getFeatureDim(self):
return self.getOrDefault(self.feature_dim)
def setFeatureDim(self, value):
if value <= 0:
raise ValueError("feature_dim must be > 0. Got %s" % value)
self._set(feature_dim=value)
def getNumFactors(self):
return self.getOrDefault(self.num_factors)
def setNumFactors(self, value):
if value <= 0:
raise ValueError("num_factors must be > 0, got: %s" % value)
self._set(num_factors=value)
def getMiniBatchSize(self):
return self.getOrDefault(self.mini_batch_size)
def setMiniBatchSize(self, value):
if value <= 0:
raise ValueError("mini_batch_size must be > 0. Got %s" % value)
self._set(mini_batch_size=value)
def getEpochs(self):
return self.getOrDefault(self.epochs)
def setEpochs(self, value):
if value <= 0:
raise ValueError("epochs must be > 0, got: %s" % value)
self._set(epochs=value)
def getClipGradient(self):
return self.getOrDefault(self.clip_gradient)
def setClipGradient(self, value):
self._set(clip_gradient=value)
def getEps(self):
return self.getOrDefault(self.eps)
def setEps(self, value):
self._set(eps=value)
def getRescaleGrad(self):
return self.getOrDefault(self.rescale_grad)
def setRescaleGrad(self, value):
self._set(rescale_grad=value)
def getBiasLr(self):
return self.getOrDefault(self.bias_lr)
def setBiasLr(self, value):
if value < 0:
raise ValueError("bias_lr must be >= 0. Got %s" % value)
self._set(bias_lr=value)
def getLinearLr(self):
return self.getOrDefault(self.linear_lr)
def setLinearLr(self, value):
if value < 0:
raise ValueError("linear_lr must be >= 0. Got %s" % value)
self._set(linear_lr=value)
def getFactorsLr(self):
return self.getOrDefault(self.factors_lr)
def setFactorsLr(self, value):
if value < 0:
raise ValueError("factors_lr must be >= 0. Got %s" % value)
self._set(factors_lr=value)
def getBiasWd(self):
return self.getOrDefault(self.bias_wd)
def setBiasWd(self, value):
if value < 0:
raise ValueError("bias_wd must be >= 0. Got %s" % value)
self._set(bias_wd=value)
def getLinearWd(self):
return self.getOrDefault(self.linear_wd)
def setLinearWd(self, value):
if value < 0:
raise ValueError("linear_wd must be >= 0. Got %s" % value)
self._set(linear_wd=value)
def getFactorsWd(self):
return self.getOrDefault(self.factors_wd)
def setFactorsWd(self, value):
if value < 0:
raise ValueError("factors_wd must be >= 0. Got %s" % value)
self._set(factors_wd=value)
def getBiasInitMethod(self):
return self.getOrDefault(self.bias_init_method)
def setBiasInitMethod(self, value):
if value not in ('uniform', 'normal', 'constant'):
raise ValueError("bias_init_method must be 'uniform',"
" 'constant' or 'normal', got: %s" % value)
self._set(bias_init_method=value)
def getBiasInitScale(self):
return self.getOrDefault(self.bias_init_scale)
def setBiasInitScale(self, value):
if value < 0:
raise ValueError("bias_init_scale must be >= 0. Got %s" % value)
self._set(bias_init_scale=value)
def getBiasInitSigma(self):
return self.getOrDefault(self.bias_init_sigma)
def setBiasInitSigma(self, value):
if value < 0:
raise ValueError("bias_init_sigma must be >= 0. Got %s" % value)
self._set(bias_init_sigma=value)
def getBiasInitValue(self):
return self.getOrDefault(self.bias_init_value)
def setBiasInitValue(self, value):
self._set(bias_init_value=value)
def getLinearInitMethod(self):
return self.getOrDefault(self.linear_init_method)
def setLinearInitMethod(self, value):
if value not in ('uniform', 'normal', 'constant'):
raise ValueError("linear_init_method must be 'uniform', "
"'constant' or 'normal', got: %s" % value)
self._set(linear_init_method=value)
def getLinearInitScale(self):
return self.getOrDefault(self.linear_init_scale)
def setLinearInitScale(self, value):
if value < 0:
raise ValueError("linear_init_scale must be >= 0. Got %s" % value)
self._set(linear_init_scale=value)
def getLinearInitSigma(self):
return self.getOrDefault(self.linear_init_sigma)
def setLinearInitSigma(self, value):
if value < 0:
raise ValueError("linear_init_sigma must be >= 0. Got %s" % value)
self._set(linear_init_sigma=value)
def getLinearInitValue(self):
return self.getOrDefault(self.linear_init_value)
def setLinearInitValue(self, value):
self._set(linear_init_value=value)
def getFactorsInitMethod(self):
return self.getOrDefault(self.factors_init_method)
def setFactorsInitMethod(self, value):
if value not in ('uniform', 'normal', 'constant'):
raise ValueError("factors_init_method must be 'uniform', "
"'constant' or 'normal', got: %s" % value)
self._set(factors_init_method=value)
def getFactorsInitScale(self):
return self.getOrDefault(self.factors_init_scale)
def setFactorsInitScale(self, value):
if value < 0:
raise ValueError("factors_init_scale must be >= 0. Got %s" % value)
self._set(factors_init_scale=value)
def getFactorsInitSigma(self):
return self.getOrDefault(self.factors_init_sigma)
def setFactorsInitSigma(self, value):
if value < 0:
raise ValueError("factors_init_sigma must be >= 0. Got %s" % value)
self._set(factors_init_sigma=value)
def getFactorsInitValue(self):
return self.getOrDefault(self.factors_init_value)
def setFactorsInitValue(self, value):
self._set(factors_init_value=value)
class FactorizationMachinesBinaryClassifier(SageMakerEstimatorBase, FactorizationMachinesParams):
"""
A :class:`~sagemaker_pyspark.SageMakerEstimator` that runs a Factorization Machines training
job in "binary classifier" mode in SageMaker and returns a
:class:`~sagemaker_pyspark.SageMakerModel` that can be used to transform a DataFrame using
the hosted Factorization Machines model. The Factorization Machines Binary Classifier is useful
for classifying examples into one of two classes.
Amazon SageMaker Factorization Machines trains on RecordIO-encoded Amazon Record protobuf data.
SageMaker pyspark writes a DataFrame to S3 by selecting a column of Vectors named "features"
and, if present, a column of Doubles named "label". These names are configurable by passing a
dictionary with entries in trainingSparkDataFormatOptions with key "labelColumnName" or
"featuresColumnName", with values corresponding to the desired label and features columns.
Inferences made against an Endpoint hosting a Factorization Machines Binary classifier model
contain a "score" field and a "predicted_label" field, both appended to the
input DataFrame as Doubles.
Args:
sageMakerRole (IAMRole): The SageMaker TrainingJob and Hosting IAM Role. Used by
SageMaker to access S3 and ECR Resources. SageMaker hosted Endpoint instances
launched by this Estimator run with this role.
trainingInstanceType (str): The SageMaker TrainingJob Instance Type to use.
trainingInstanceCount (int): The number of instances of instanceType to run an
SageMaker Training Job with.
endpointInstanceType (str): The SageMaker Endpoint Config instance type.
endpointInitialInstanceCount (int): The SageMaker Endpoint Config minimum number of
instances that can be used to host modelImage.
requestRowSerializer (RequestRowSerializer): Serializes Spark DataFrame Rows for
transformation by Models built from this Estimator.
responseRowDeserializer (ResponseRowDeserializer): Deserializes an Endpoint response into a
series of Rows.
trainingInputS3DataPath (S3Resource): An S3 location to upload SageMaker Training Job input
data to.
trainingOutputS3DataPath (S3Resource): An S3 location for SageMaker to store Training Job
output data to.
trainingInstanceVolumeSizeInGB (int): The EBS volume size in gigabytes of each instance.
trainingProjectedColumns (List): The columns to project from the Dataset being fit before
training. If an Optional.empty is passed then no specific projection will occur and
all columns will be serialized.
trainingChannelName (str): The SageMaker Channel name to input serialized Dataset fit
input to.
trainingContentType (str): The MIME type of the training data.
trainingS3DataDistribution (str): The SageMaker Training Job S3 data distribution scheme.
trainingSparkDataFormat (str): The Spark Data Format name used to serialize the Dataset
being fit for input to SageMaker.
trainingSparkDataFormatOptions (dict): The Spark Data Format Options used during
serialization of the Dataset being fit.
trainingInputMode (str): The SageMaker Training Job Channel input mode.
trainingCompressionCodec (str): The type of compression to use when serializing the
Dataset being fit for input to SageMaker.
trainingMaxRuntimeInSeconds (int): A SageMaker Training Job Termination Condition
MaxRuntimeInHours.
trainingKmsKeyId (str): A KMS key ID for the Output Data Source.
modelEnvironmentVariables (dict): The environment variables that SageMaker will set on the
model container during execution.
endpointCreationPolicy (EndpointCreationPolicy): Defines how a SageMaker Endpoint
referenced by a SageMakerModel is created.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send CreateTrainingJob,
CreateModel, and CreateEndpoint requests.
region (str): The region in which to run the algorithm. If not specified, gets the region
from the DefaultAwsRegionProviderChain.
s3Client (AmazonS3): Used to create a bucket for staging SageMaker Training Job
input and/or output if either are set to S3AutoCreatePath.
stsClient (AmazonSTS): Used to resolve the account number when creating staging
input / output buckets.
modelPrependInputRowsToTransformationRows (bool): Whether the transformation result on
Models built by this Estimator should also include the input Rows. If true,
each output Row is formed by a concatenation of the input Row with the corresponding
Row produced by SageMaker Endpoint invocation, produced by responseRowDeserializer.
If false, each output Row is just taken from responseRowDeserializer.
deleteStagingDataAfterTraining (bool): Whether to remove the training data on s3 after
training is complete or failed.
namePolicyFactory (NamePolicyFactory): The NamePolicyFactory to use when naming SageMaker
entities created during fit.
uid (str): The unique identifier of this Estimator. Used to represent this stage in Spark
ML pipelines.
"""
_wrapped_class = \
"com.amazonaws.services.sagemaker.sparksdk.algorithms."\
"FactorizationMachinesBinaryClassifier"
def __init__(
self,
trainingInstanceType,
trainingInstanceCount,
endpointInstanceType,
endpointInitialInstanceCount,
sagemakerRole=IAMRoleFromConfig(),
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=FactorizationMachinesBinaryClassifierDeserializer(),
trainingInputS3DataPath=S3AutoCreatePath(),
trainingOutputS3DataPath=S3AutoCreatePath(),
trainingInstanceVolumeSizeInGB=1024,
trainingProjectedColumns=None,
trainingChannelName="train",
trainingContentType=None,
trainingS3DataDistribution="ShardedByS3Key",
trainingSparkDataFormat="sagemaker",
trainingSparkDataFormatOptions=None,
trainingInputMode="File",
trainingCompressionCodec=None,
trainingMaxRuntimeInSeconds=24*60*60,
trainingKmsKeyId=None,
modelEnvironmentVariables=None,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_CONSTRUCT,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
region=None,
s3Client=SageMakerClients.create_s3_default_client(),
stsClient=SageMakerClients.create_sts_default_client(),
modelPrependInputRowsToTransformationRows=True,
deleteStagingDataAfterTraining=True,
namePolicyFactory=RandomNamePolicyFactory(),
uid=None,
javaObject=None):
if trainingSparkDataFormatOptions is None:
trainingSparkDataFormatOptions = {}
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
if uid is None:
uid = Identifiable._randomUID()
kwargs = locals().copy()
del kwargs['self']
super(FactorizationMachinesBinaryClassifier, self).__init__(**kwargs)
default_params = {
'predictor_type': 'binary_classifier'
}
self._setDefault(**default_params)
def _get_java_obj(self, **kwargs):
if 'javaObject' in kwargs and kwargs['javaObject'] is not None:
return kwargs['javaObject']
else:
return self._new_java_obj(
FactorizationMachinesBinaryClassifier._wrapped_class,
kwargs['sagemakerRole'],
kwargs['trainingInstanceType'],
kwargs['trainingInstanceCount'],
kwargs['endpointInstanceType'],
kwargs['endpointInitialInstanceCount'],
kwargs['requestRowSerializer'],
kwargs['responseRowDeserializer'],
kwargs['trainingInputS3DataPath'],
kwargs['trainingOutputS3DataPath'],
kwargs['trainingInstanceVolumeSizeInGB'],
Option(kwargs['trainingProjectedColumns']),
kwargs['trainingChannelName'],
Option(kwargs['trainingContentType']),
kwargs['trainingS3DataDistribution'],
kwargs['trainingSparkDataFormat'],
kwargs['trainingSparkDataFormatOptions'],
kwargs['trainingInputMode'],
Option(kwargs['trainingCompressionCodec']),
kwargs['trainingMaxRuntimeInSeconds'],
Option(kwargs['trainingKmsKeyId']),
kwargs['modelEnvironmentVariables'],
kwargs['endpointCreationPolicy'],
kwargs['sagemakerClient'],
Option(kwargs['region']),
kwargs['s3Client'],
kwargs['stsClient'],
kwargs['modelPrependInputRowsToTransformationRows'],
kwargs['deleteStagingDataAfterTraining'],
kwargs['namePolicyFactory'],
kwargs['uid']
)
@classmethod
def _from_java(cls, javaObject):
return FactorizationMachinesBinaryClassifier(sagemakerRole=None, javaObject=javaObject)
class FactorizationMachinesRegressor(SageMakerEstimatorBase, FactorizationMachinesParams):
"""
A :class:`~sagemaker_pyspark.SageMakerEstimator` that runs a Factorization Machines training
job in "regressor" mode in SageMaker and returns a :class:`~sagemaker_pyspark.SageMakerModel`
that can be used to transform a DataFrame using the hosted Linear Learner model.
The Factorization Machines Regressor is useful for predicting a real-valued label
from training examples.
Amazon SageMaker Linear Learner trains on RecordIO-encoded Amazon Record protobuf data.
SageMaker pyspark writes a DataFrame to S3 by selecting a column of Vectors named "features"
and, if present, a column of Doubles named "label". These names are configurable by passing a
dictionary with entries in trainingSparkDataFormatOptions with key "labelColumnName" or
"featuresColumnName", with values corresponding to the desired label and features columns.
For inference against a hosted Endpoint, the SageMakerModel returned by :meth :`fit()` by
Factorization Machines uses :class:`~sagemaker_pyspark.transformation
.serializers.ProtobufRequestRowSerializer` to serialize Rows into RecordIO-encoded Amazon
Record protobuf messages, by default selecting the column named "features" expected to contain
a Vector of Doubles.
Inferences made against an Endpoint hosting a Factorization Machines Regressor model contain
a "score" field appended to the input DataFrame as a Double.
Args:
sageMakerRole (IAMRole): The SageMaker TrainingJob and Hosting IAM Role. Used by
SageMaker to access S3 and ECR Resources. SageMaker hosted Endpoint instances
launched by this Estimator run with this role.
trainingInstanceType (str): The SageMaker TrainingJob Instance Type to use.
trainingInstanceCount (int): The number of instances of instanceType to run an
SageMaker Training Job with.
endpointInstanceType (str): The SageMaker Endpoint Config instance type.
endpointInitialInstanceCount (int): The SageMaker Endpoint Config minimum number of
instances that can be used to host modelImage.
requestRowSerializer (RequestRowSerializer): Serializes Spark DataFrame Rows for
transformation by Models built from this Estimator.
responseRowDeserializer (ResponseRowDeserializer): Deserializes an Endpoint response into a
series of Rows.
trainingInputS3DataPath (S3Resource): An S3 location to upload SageMaker Training Job input
data to.
trainingOutputS3DataPath (S3Resource): An S3 location for SageMaker to store Training Job
output data to.
trainingInstanceVolumeSizeInGB (int): The EBS volume size in gigabytes of each instance.
trainingProjectedColumns (List): The columns to project from the Dataset being fit before
training. If an Optional.empty is passed then no specific projection will occur and
all columns will be serialized.
trainingChannelName (str): The SageMaker Channel name to input serialized Dataset fit
input to.
trainingContentType (str): The MIME type of the training data.
trainingS3DataDistribution (str): The SageMaker Training Job S3 data distribution scheme.
trainingSparkDataFormat (str): The Spark Data Format name used to serialize the Dataset
being fit for input to SageMaker.
trainingSparkDataFormatOptions (dict): The Spark Data Format Options used during
serialization of the Dataset being fit.
trainingInputMode (str): The SageMaker Training Job Channel input mode.
trainingCompressionCodec (str): The type of compression to use when serializing the
Dataset being fit for input to SageMaker.
trainingMaxRuntimeInSeconds (int): A SageMaker Training Job Termination Condition
MaxRuntimeInHours.
trainingKmsKeyId (str): A KMS key ID for the Output Data Source.
modelEnvironmentVariables (dict): The environment variables that SageMaker will set on the
model container during execution.
endpointCreationPolicy (EndpointCreationPolicy): Defines how a SageMaker Endpoint
referenced by a SageMakerModel is created.
sagemakerClient (AmazonSageMaker) Amazon SageMaker client. Used to send CreateTrainingJob,
CreateModel, and CreateEndpoint requests.
region (str): The region in which to run the algorithm. If not specified, gets the region
from the DefaultAwsRegionProviderChain.
s3Client (AmazonS3): Used to create a bucket for staging SageMaker Training Job
input and/or output if either are set to S3AutoCreatePath.
stsClient (AmazonSTS): Used to resolve the account number when creating staging
input / output buckets.
modelPrependInputRowsToTransformationRows (bool): Whether the transformation result on
Models built by this Estimator should also include the input Rows. If true,
each output Row is formed by a concatenation of the input Row with the corresponding
Row produced by SageMaker Endpoint invocation, produced by responseRowDeserializer.
If false, each output Row is just taken from responseRowDeserializer.
deleteStagingDataAfterTraining (bool): Whether to remove the training data on s3 after
training is complete or failed.
namePolicyFactory (NamePolicyFactory): The NamePolicyFactory to use when naming SageMaker
entities created during fit.
uid (str): The unique identifier of this Estimator. Used to represent this stage in Spark
ML pipelines.
"""
_wrapped_class = "com.amazonaws.services.sagemaker.sparksdk.algorithms."\
"FactorizationMachinesRegressor"
def __init__(self,
trainingInstanceType,
trainingInstanceCount,
endpointInstanceType,
endpointInitialInstanceCount,
sagemakerRole=IAMRoleFromConfig(),
requestRowSerializer=ProtobufRequestRowSerializer(),
responseRowDeserializer=FactorizationMachinesRegressorDeserializer(),
trainingInputS3DataPath=S3AutoCreatePath(),
trainingOutputS3DataPath=S3AutoCreatePath(),
trainingInstanceVolumeSizeInGB=1024,
trainingProjectedColumns=None,
trainingChannelName="train",
trainingContentType=None,
trainingS3DataDistribution="ShardedByS3Key",
trainingSparkDataFormat="sagemaker",
trainingSparkDataFormatOptions=None,
trainingInputMode="File",
trainingCompressionCodec=None,
trainingMaxRuntimeInSeconds=24*60*60,
trainingKmsKeyId=None,
modelEnvironmentVariables=None,
endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_CONSTRUCT,
sagemakerClient=SageMakerClients.create_sagemaker_client(),
region=None,
s3Client=SageMakerClients.create_s3_default_client(),
stsClient=SageMakerClients.create_sts_default_client(),
modelPrependInputRowsToTransformationRows=True,
deleteStagingDataAfterTraining=True,
namePolicyFactory=RandomNamePolicyFactory(),
uid=None,
javaObject=None):
if trainingSparkDataFormatOptions is None:
trainingSparkDataFormatOptions = {}
if modelEnvironmentVariables is None:
modelEnvironmentVariables = {}
if uid is None:
uid = Identifiable._randomUID()
kwargs = locals().copy()
del kwargs['self']
super(FactorizationMachinesRegressor, self).__init__(**kwargs)
default_params = {
'predictor_type': 'regressor'
}
self._setDefault(**default_params)
def _get_java_obj(self, **kwargs):
if 'javaObject' in kwargs and kwargs['javaObject'] is not None:
return kwargs['javaObject']
else:
return self._new_java_obj(
FactorizationMachinesRegressor._wrapped_class,
kwargs['sagemakerRole'],
kwargs['trainingInstanceType'],
kwargs['trainingInstanceCount'],
kwargs['endpointInstanceType'],
kwargs['endpointInitialInstanceCount'],
kwargs['requestRowSerializer'],
kwargs['responseRowDeserializer'],
kwargs['trainingInputS3DataPath'],
kwargs['trainingOutputS3DataPath'],
kwargs['trainingInstanceVolumeSizeInGB'],
Option(kwargs['trainingProjectedColumns']),
kwargs['trainingChannelName'],
Option(kwargs['trainingContentType']),
kwargs['trainingS3DataDistribution'],
kwargs['trainingSparkDataFormat'],
kwargs['trainingSparkDataFormatOptions'],
kwargs['trainingInputMode'],
Option(kwargs['trainingCompressionCodec']),
kwargs['trainingMaxRuntimeInSeconds'],
Option(kwargs['trainingKmsKeyId']),
kwargs['modelEnvironmentVariables'],
kwargs['endpointCreationPolicy'],
kwargs['sagemakerClient'],
Option(kwargs['region']),
kwargs['s3Client'],
kwargs['stsClient'],
kwargs['modelPrependInputRowsToTransformationRows'],
kwargs['deleteStagingDataAfterTraining'],
kwargs['namePolicyFactory'],
kwargs['uid']
)
@classmethod
def _from_java(cls, javaObject):
return FactorizationMachinesRegressor(sagemakerRole=None, javaObject=javaObject) | /sagemaker_pyspark-1.4.5.tar.gz/sagemaker_pyspark-1.4.5/src/sagemaker_pyspark/algorithms/FactorizationMachinesSageMakerEstimator.py | 0.775817 | 0.563678 | FactorizationMachinesSageMakerEstimator.py | pypi |
from __future__ import absolute_import
import os
import torch
from sagemaker_inference import (
content_types,
decoder,
default_inference_handler,
encoder,
errors,
utils,
)
INFERENCE_ACCELERATOR_PRESENT_ENV = "SAGEMAKER_INFERENCE_ACCELERATOR_PRESENT"
DEFAULT_MODEL_FILENAME = "model.pt"
class ModelLoadError(Exception):
pass
class DefaultPytorchInferenceHandler(default_inference_handler.DefaultInferenceHandler):
VALID_CONTENT_TYPES = (content_types.JSON, content_types.NPY)
@staticmethod
def _is_model_file(filename):
is_model_file = False
if os.path.isfile(filename):
_, ext = os.path.splitext(filename)
is_model_file = ext in [".pt", ".pth"]
return is_model_file
def default_model_fn(self, model_dir):
"""Loads a model. For PyTorch, a default function to load a model only if Elastic Inference is used.
In other cases, users should provide customized model_fn() in script.
Args:
model_dir: a directory where model is saved.
Returns: A PyTorch model.
"""
if os.getenv(INFERENCE_ACCELERATOR_PRESENT_ENV) == "true":
model_path = os.path.join(model_dir, DEFAULT_MODEL_FILENAME)
if not os.path.exists(model_path):
raise FileNotFoundError("Failed to load model with default model_fn: missing file {}."
.format(DEFAULT_MODEL_FILENAME))
# Client-framework is CPU only. But model will run in Elastic Inference server with CUDA.
try:
return torch.jit.load(model_path, map_location=torch.device('cpu'))
except RuntimeError as e:
raise ModelLoadError(
"Failed to load {}. Please ensure model is saved using torchscript.".format(model_path)
) from e
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_path = os.path.join(model_dir, DEFAULT_MODEL_FILENAME)
if not os.path.exists(model_path):
model_files = [file for file in os.listdir(model_dir) if self._is_model_file(file)]
if len(model_files) != 1:
raise ValueError(
"Exactly one .pth or .pt file is required for PyTorch models: {}".format(model_files)
)
model_path = os.path.join(model_dir, model_files[0])
try:
model = torch.jit.load(model_path, map_location=device)
except RuntimeError as e:
raise ModelLoadError(
"Failed to load {}. Please ensure model is saved using torchscript.".format(model_path)
) from e
model = model.to(device)
return model
def default_input_fn(self, input_data, content_type):
"""A default input_fn that can handle JSON, CSV and NPZ formats.
Args:
input_data: the request payload serialized in the content_type format
content_type: the request content_type
Returns: input_data deserialized into torch.FloatTensor or torch.cuda.FloatTensor,
depending if cuda is available.
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
np_array = decoder.decode(input_data, content_type)
tensor = torch.FloatTensor(
np_array) if content_type in content_types.UTF8_TYPES else torch.from_numpy(np_array)
return tensor.to(device)
def default_predict_fn(self, data, model):
"""A default predict_fn for PyTorch. Calls a model on data deserialized in input_fn.
Runs prediction on GPU if cuda is available.
Args:
data: input data (torch.Tensor) for prediction deserialized by input_fn
model: PyTorch model loaded in memory by model_fn
Returns: a prediction
"""
with torch.no_grad():
if os.getenv(INFERENCE_ACCELERATOR_PRESENT_ENV) == "true":
device = torch.device("cpu")
model = model.to(device)
input_data = data.to(device)
model.eval()
with torch.jit.optimized_execution(True, {"target_device": "eia:0"}):
output = model(input_data)
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
input_data = data.to(device)
model.eval()
output = model(input_data)
return output
def default_output_fn(self, prediction, accept):
"""A default output_fn for PyTorch. Serializes predictions from predict_fn to JSON, CSV or NPY format.
Args:
prediction: a prediction result from predict_fn
accept: type which the output data needs to be serialized
Returns: output data serialized
"""
if type(prediction) == torch.Tensor:
prediction = prediction.detach().cpu().numpy().tolist()
for content_type in utils.parse_accept(accept):
if content_type in encoder.SUPPORTED_CONTENT_TYPES:
encoded_prediction = encoder.encode(prediction, content_type)
if content_type == content_types.CSV:
encoded_prediction = encoded_prediction.encode("utf-8")
return encoded_prediction
raise errors.UnsupportedFormatError(accept) | /sagemaker_pytorch_inference-2.0.17.tar.gz/sagemaker_pytorch_inference-2.0.17/src/sagemaker_pytorch_serving_container/default_pytorch_inference_handler.py | 0.853211 | 0.231245 | default_pytorch_inference_handler.py | pypi |
from __future__ import absolute_import
from sagemaker_pytorch_serving_container import ts_parameters
import os
import logging
logger = logging.getLogger()
DEFAULT_TS_BATCH_SIZE = 1
DEFAULT_TS_MAX_BATCH_DELAY = 100
DEFAULT_TS_MIN_WORKERS = 1
DEFAULT_TS_MAX_WORKERS = 1
DEFAULT_TS_RESPONSE_TIMEOUT = 60
class TorchServeEnvironment():
"""Provides access to aspects of the torchserve environment relevant to serving containers,
including system characteristics, environment variables and configuration settings.
The Environment is a read-only snapshot of the container environment.
It is a dictionary-like object, allowing any builtin function that works with dictionary.
Attributes:
batch_size (int): This is the maximum batch size in ms that a model is expected to handle
max_batch_delay (int): This is the maximum batch delay time TorchServe waits to receive
batch_size number of requests. If TorchServe doesn’t receive batch_size number of requests
before this timer time’s out, it sends what ever requests that were received to the model handler
min_workers (int): Minimum number of workers that torchserve is allowed to scale down to
max_workers (int): Minimum number of workers that torchserve is allowed to scale up to
response_timeout (int): Time delay after which inference will timeout in absence of a response
"""
def __init__(self):
self._batch_size = int(os.environ.get(ts_parameters.MODEL_SERVER_BATCH_SIZE, DEFAULT_TS_BATCH_SIZE))
self._max_batch_delay = int(os.environ.get(ts_parameters.MODEL_SERVER_MAX_BATCH_DELAY,
DEFAULT_TS_MAX_BATCH_DELAY))
self._min_workers = int(os.environ.get(ts_parameters.MODEL_SERVER_MIN_WORKERS, DEFAULT_TS_MIN_WORKERS))
self._max_workers = int(os.environ.get(ts_parameters.MODEL_SERVER_MAX_WORKERS, DEFAULT_TS_MAX_WORKERS))
self._response_timeout = int(os.environ.get(ts_parameters.MODEL_SERVER_RESPONSE_TIMEOUT,
DEFAULT_TS_RESPONSE_TIMEOUT))
def is_env_set(self): # type: () -> bool
"""bool: whether or not the environment variables have been set"""
ts_env_list = [ts_parameters.MODEL_SERVER_BATCH_SIZE, ts_parameters.MODEL_SERVER_MAX_BATCH_DELAY,
ts_parameters.MODEL_SERVER_MIN_WORKERS, ts_parameters.MODEL_SERVER_MAX_WORKERS,
ts_parameters.MODEL_SERVER_RESPONSE_TIMEOUT]
if any(env in ts_env_list for env in os.environ):
return True
@property
def batch_size(self): # type: () -> int
"""int: number of requests to batch before running inference on the server"""
return self._batch_size
@property
def max_batch_delay(self): # type: () -> int
"""int: time delay in milliseconds, to wait for incoming requests to be batched,
before running inference on the server
"""
return self._max_batch_delay
@property
def min_workers(self): # type:() -> int
"""int: minimum number of worker for model
"""
return self._min_workers
@property
def max_workers(self): # type() -> int
"""int: maximum number of workers for model
"""
return self._max_workers
@property
def response_timeout(self): # type() -> int
"""int: time delay after which inference will timeout in absense of a response
"""
return self._response_timeout | /sagemaker_pytorch_inference-2.0.17.tar.gz/sagemaker_pytorch_inference-2.0.17/src/sagemaker_pytorch_serving_container/ts_environment.py | 0.818338 | 0.28077 | ts_environment.py | pypi |
"""This module contains functionality to configure and start Torchserve."""
from __future__ import absolute_import
import os
import signal
import subprocess
import pkg_resources
import psutil
import logging
from retrying import retry
import sagemaker_pytorch_serving_container
from sagemaker_pytorch_serving_container import ts_environment
from sagemaker_inference import environment, utils, model_server
logger = logging.getLogger()
TS_CONFIG_FILE = os.path.join("/etc", "sagemaker-ts.properties")
DEFAULT_TS_CONFIG_FILE = pkg_resources.resource_filename(
sagemaker_pytorch_serving_container.__name__, "/etc/default-ts.properties"
)
MME_TS_CONFIG_FILE = pkg_resources.resource_filename(
sagemaker_pytorch_serving_container.__name__, "/etc/mme-ts.properties"
)
DEFAULT_TS_LOG_FILE = pkg_resources.resource_filename(
sagemaker_pytorch_serving_container.__name__, "/etc/log4j2.xml"
)
DEFAULT_TS_MODEL_NAME = "model"
DEFAULT_HANDLER_SERVICE = "sagemaker_pytorch_serving_container.handler_service"
ENABLE_MULTI_MODEL = os.getenv("SAGEMAKER_MULTI_MODEL", "false") == "true"
MODEL_STORE = "/" if ENABLE_MULTI_MODEL else os.path.join(os.getcwd(), ".sagemaker", "ts", "models")
PYTHON_PATH_ENV = "PYTHONPATH"
TS_NAMESPACE = "org.pytorch.serve.ModelServer"
def start_torchserve(handler_service=DEFAULT_HANDLER_SERVICE):
"""Configure and start the model server.
Args:
handler_service (str): Python path pointing to a module that defines
a class with the following:
- A ``handle`` method, which is invoked for all incoming inference
requests to the model server.
- A ``initialize`` method, which is invoked at model server start up
for loading the model.
Defaults to ``sagemaker_pytorch_serving_container.default_handler_service``.
"""
if ENABLE_MULTI_MODEL:
if "SAGEMAKER_HANDLER" not in os.environ:
os.environ["SAGEMAKER_HANDLER"] = handler_service
else:
if not os.path.exists(MODEL_STORE):
os.makedirs(MODEL_STORE)
_set_python_path()
_create_torchserve_config_file(handler_service)
if os.path.exists(model_server.REQUIREMENTS_PATH):
model_server._install_requirements()
ts_torchserve_cmd = [
"torchserve",
"--start",
"--model-store",
MODEL_STORE,
"--ts-config",
TS_CONFIG_FILE,
"--log-config",
DEFAULT_TS_LOG_FILE,
]
default_model_path_args = ["--models", DEFAULT_TS_MODEL_NAME + "=" + environment.model_dir]
if not ENABLE_MULTI_MODEL:
ts_torchserve_cmd += default_model_path_args
print(ts_torchserve_cmd)
logger.info(ts_torchserve_cmd)
subprocess.Popen(ts_torchserve_cmd)
ts_process = _retrieve_ts_server_process()
_add_sigterm_handler(ts_process)
ts_process.wait()
def _set_python_path():
# Torchserve handles code execution by appending the export path, provided
# to the model archiver, to the PYTHONPATH env var.
# The code_dir has to be added to the PYTHONPATH otherwise the
# user provided module can not be imported properly.
if PYTHON_PATH_ENV in os.environ:
os.environ[PYTHON_PATH_ENV] = "{}:{}".format(environment.code_dir, os.environ[PYTHON_PATH_ENV])
else:
os.environ[PYTHON_PATH_ENV] = environment.code_dir
def _create_torchserve_config_file(handler_service):
configuration_properties = _generate_ts_config_properties(handler_service)
utils.write_file(TS_CONFIG_FILE, configuration_properties)
def _generate_ts_config_properties(handler_service):
env = environment.Environment()
user_defined_configuration = {
"default_response_timeout": env.model_server_timeout,
"default_workers_per_model": env.model_server_workers,
"inference_address": "http://0.0.0.0:{}".format(env.inference_http_port),
"management_address": "http://0.0.0.0:{}".format(env.management_http_port),
"default_service_handler": handler_service + ":handle",
}
ts_env = ts_environment.TorchServeEnvironment()
if ts_env.is_env_set() and not ENABLE_MULTI_MODEL:
models_string = f'''{{\\
"{DEFAULT_TS_MODEL_NAME}": {{\\
"1.0": {{\\
"defaultVersion": true,\\
"marName": "{DEFAULT_TS_MODEL_NAME}.mar",\\
"minWorkers": {ts_env._min_workers},\\
"maxWorkers": {ts_env._max_workers},\\
"batchSize": {ts_env._batch_size},\\
"maxBatchDelay": {ts_env._max_batch_delay},\\
"responseTimeout": {ts_env._response_timeout}\\
}}\\
}}\\
}}'''
user_defined_configuration["models"] = models_string
logger.warn("Sagemaker TS environment variables have been set and will be used "
"for single model endpoint.")
custom_configuration = str()
for key in user_defined_configuration:
value = user_defined_configuration.get(key)
if value:
custom_configuration += "{}={}\n".format(key, value)
if ENABLE_MULTI_MODEL:
default_configuration = utils.read_file(MME_TS_CONFIG_FILE)
else:
default_configuration = utils.read_file(DEFAULT_TS_CONFIG_FILE)
return default_configuration + custom_configuration
def _add_sigterm_handler(ts_process):
def _terminate(signo, frame): # pylint: disable=unused-argument
try:
os.kill(ts_process.pid, signal.SIGTERM)
except OSError:
pass
signal.signal(signal.SIGTERM, _terminate)
# retry for 10 seconds
@retry(stop_max_delay=10 * 1000)
def _retrieve_ts_server_process():
ts_server_processes = list()
for process in psutil.process_iter():
if TS_NAMESPACE in process.cmdline():
ts_server_processes.append(process)
if not ts_server_processes:
raise Exception("Torchserve model server was unsuccessfully started")
if len(ts_server_processes) > 1:
raise Exception("multiple ts model servers are not supported")
return ts_server_processes[0] | /sagemaker_pytorch_inference-2.0.17.tar.gz/sagemaker_pytorch_inference-2.0.17/src/sagemaker_pytorch_serving_container/torchserve.py | 0.649023 | 0.198316 | torchserve.py | pypi |
from __future__ import absolute_import
import os
import logging
from retrying import retry
import six
import socket
import sys
from sagemaker_training import entry_point, environment, errors, runner
MASTER_PORT = '7777'
LAUNCH_SMDATAPARALLEL_ENV_NAME = 'sagemaker_distributed_dataparallel_enabled'
LAUNCH_MPI_ENV_NAME = 'sagemaker_mpi_enabled'
LAUNCH_PYTORCH_DDP_ENV_NAME = "sagemaker_pytorch_ddp_enabled"
LAUNCH_PYTORCH_XLA_ENV_NAME = "sagemaker_pytorch_xla_multi_worker_enabled"
LAUNCH_TORCH_DISTRIBUTED_ENV_NAME = "sagemaker_torch_distributed_enabled"
logger = logging.getLogger(__name__)
def train(training_environment):
"""Run PyTorch training on a user supplied module.
The user supplied module is run in either a local or distributed SageMaker
environment.
The user supplied module and its dependencies are downloaded from S3.
Training is invoked by calling a "train" function in the user supplied module.
if the environment contains multiple hosts, then a distributed learning
task is started.
Args:
training_environment: training environment object containing environment
variables, training arguments and hyperparameters.
"""
# Block until all host DNS lookups succeed. Relies on retrying dns_lookup.
logger.info('Block until all host DNS lookups succeed.')
for host in training_environment.hosts:
_dns_lookup(host)
_set_nccl_environment(training_environment.network_interface_name)
_set_distributed_environment(training_environment)
mpi_enabled = training_environment.additional_framework_parameters.get(LAUNCH_MPI_ENV_NAME)
pytorch_ddp_enabled = training_environment.additional_framework_parameters.get(
LAUNCH_PYTORCH_DDP_ENV_NAME, False
)
smdataparallel_enabled = training_environment.additional_framework_parameters.get(
LAUNCH_SMDATAPARALLEL_ENV_NAME, False
)
pytorch_xla_enabled = training_environment.additional_framework_parameters.get(
LAUNCH_PYTORCH_XLA_ENV_NAME, False
)
torch_distributed_enabled = training_environment.additional_framework_parameters.get(
LAUNCH_TORCH_DISTRIBUTED_ENV_NAME, False
)
# default scenario
runner_type = runner.ProcessRunnerType
if training_environment.current_instance_group in training_environment.distribution_instance_groups:
if mpi_enabled:
runner_type = runner.MPIRunnerType
elif pytorch_ddp_enabled:
runner_type = runner.SMDataParallelRunnerType
logger.info('Invoking SMDataParallel for native PT DDP job')
elif torch_distributed_enabled:
runner_type = runner.TorchDistributedRunnerType
logger.info('Invoking TorchDistributed...')
elif smdataparallel_enabled:
runner_type = runner.SMDataParallelRunnerType
logger.info('Invoking SMDataParallel')
elif pytorch_xla_enabled:
runner_type = runner.PyTorchXLARunnerType
logger.info('Invoking PT-XLA Runner')
logger.info('Invoking user training script.')
# get capture_error from framework parameters
capture_error = True
if training_environment.additional_framework_parameters.get("sagemaker_toolkit_native_launcher_enabled"):
capture_error = False
logger.info(f'capture_error is {capture_error}. Default is True')
_set_torch_version_environment()
try:
entry_point.run(uri=training_environment.module_dir,
user_entry_point=training_environment.user_entry_point,
args=training_environment.to_cmd_args(),
env_vars=training_environment.to_env_vars(),
capture_error=capture_error,
runner_type=runner_type)
except errors.ExecuteUserScriptError as err:
message = str(err)
if message.find('terminate called after throwing an instance of \'gloo::EnforceNotMet\'') > -1:
logger.warn('Known exception: {}'.format(message))
else:
info = sys.exc_info()
six.reraise(info[0], err, info[2])
@retry(stop_max_delay=1000 * 60 * 15,
wait_exponential_multiplier=100,
wait_exponential_max=30000)
def _dns_lookup(host):
"""Retry DNS lookup on host."""
return socket.gethostbyname(host)
def _set_distributed_environment(training_env):
"""Set environment variable for distributed training.
Args:
hosts: list of hosts that are used for training.
"""
# According to https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html
# hosts are sorted lexicographically.
os.environ['MASTER_ADDR'] = training_env.master_hostname
os.environ['MASTER_PORT'] = MASTER_PORT
def _set_nccl_environment(network_interface_name):
"""Set NCCL environment variables for the container.
https://docs.nvidia.com/deeplearning/sdk/nccl-developer-guide/index.html#ncclknobs
Args:
network_interface_name: The name of the network interface to use for
distributed training.
"""
# Set the network interface for inter node communication
os.environ['NCCL_SOCKET_IFNAME'] = network_interface_name
# Disable IB transport and force to use IP sockets by default
os.environ['NCCL_IB_DISABLE'] = '1'
# Set to INFO for more NCCL debugging information
os.environ['NCCL_DEBUG'] = 'WARN'
def _set_torch_version_environment():
"""Set PyTorch version environment variable.
This is the PyTorch version of the DLC.
"""
try:
import torch
os.environ["SM_DLC_TORCH_VERSION"] = torch.__version__
except ModuleNotFoundError:
logger.warn("PyTorch cannot be found")
except ImportError:
logger.warn("PyTorch can be found, but cannot be imported")
except Exception:
logger.warn("Torch version environment variable cannot be set")
def main():
train(environment.Environment()) | /sagemaker_pytorch_training-2.8.0.tar.gz/sagemaker_pytorch_training-2.8.0/src/sagemaker_pytorch_container/training.py | 0.621885 | 0.246273 | training.py | pypi |
from typing import Any, List, Union
from sagemaker_rightline.model import Rule, ValidationResult
class Equals(Rule):
"""Check if two lists are equal."""
def __init__(self, negative: bool = False) -> None:
"""Check if two lists are equal.
:param negative: whether the rule should be inverted, i.e. "not" (default: False)
:type negative: bool
:return:
:rtype:
"""
super().__init__("Equals", negative)
def run(
self,
observed: List[Union[int, float, str, dict]],
expected: List[Union[int, float, str, dict]],
validation_name: str,
) -> ValidationResult:
"""Check if two lists are equal.
:param observed: observed list
:type observed: List[Any]
:param expected: expected list
:type expected: List[Any]
:param validation_name: name of the validation
:type validation_name: str
:return: validation result
:rtype: ValidationResult
"""
try:
# In case of int, float, str
is_equal = set(observed) == set(expected)
except TypeError:
# In case of dict
if isinstance(observed, dict) and isinstance(expected, dict):
is_equal = observed == expected
# In case of nested list
elif isinstance(observed, list) and isinstance(expected, list):
is_equal = all(True if item in observed else False for item in expected) and all(
True if item in expected else False for item in observed
)
else:
is_equal = observed == expected
is_equal = is_equal if not self.negative else not is_equal
return ValidationResult(
validation_name=validation_name,
success=is_equal,
negative=self.negative,
message=f"{str(observed)} does {'not ' if not is_equal else ''}equal {str(expected)}",
subject=str(expected),
)
class Contains(Rule):
"""Check if a list contains another list."""
def __init__(self, negative: bool = False) -> None:
"""Check if a list contains another list.
:param negative: whether the rule should be inverted, i.e. "not" (default: False)
:type negative: bool
:return:
:rtype:
"""
super().__init__("Contains", negative)
def run(
self, observed: List[Any], expected: List[Any], validation_name: str
) -> ValidationResult:
"""Check if a list contains another list.
:param observed: observed list
:type observed: List[Any]
:param expected: expected list
:type expected: List[Any]
:param validation_name: name of the validation
:type validation_name: str
:return: validation result
:rtype: ValidationResult
"""
is_contained = all(item in observed for item in expected)
is_contained = is_contained if not self.negative else not is_contained
return ValidationResult(
validation_name=validation_name,
success=is_contained,
negative=self.negative,
message=f"{str(observed)} does {'not ' if not is_contained else ''}contain "
f"{str(expected)}",
subject=str(expected),
) | /sagemaker_rightline-0.3.6-py3-none-any.whl/sagemaker_rightline/rules.py | 0.957507 | 0.620277 | rules.py | pypi |
import logging
import re
from abc import ABC, abstractmethod
from copy import copy
from dataclasses import dataclass
from operator import attrgetter
from typing import Any, Iterable, List, Optional, Union
import pandas as pd
from sagemaker.workflow.pipeline import Pipeline
@dataclass
class ValidationResult:
"""Validation result dataclass."""
validation_name: str
success: bool
negative: bool
message: str
subject: str
class Rule(ABC):
"""Rule abstract base class."""
def __init__(self, name: str, negative: bool = False) -> None:
"""Initialize a Rule object.
:param name: name of the rule
:type name: str
:param negative: whether the rule should be inverted, i.e. "not" (default: False)
:type negative: bool
:return:
:rtype:
"""
self.name: str = name
self.negative: bool = negative
@abstractmethod
def run(self, observed: Any, expected: Any, validation_name: str) -> ValidationResult:
"""Run the rule.
:param observed: observed value
:type observed: Any
:param expected: expected value
:type expected: Any
:param validation_name: name of the validation
:type validation_name: str
:return: validation result
:rtype: ValidationResult
"""
raise NotImplementedError
class Validation(ABC):
"""Abstract class for validations."""
def __init__(
self, name: str, paths: Optional[List[str]] = None, rule: Optional[Rule] = None
) -> None:
"""Initialize a Validation object.
:param paths: list of paths to the attributes to be validated
:type paths: List[str]
:param name: name of the validation
:type name: str
:param rule: rule to be applied to the validation, defaults to None
:type rule: Optional[Rule]
:return:
:rtype:
"""
self.paths: List[Optional[str]] = paths if paths else []
self.name: str = name
self.rule: Rule = rule
@staticmethod
def get_filtered_attributes(filter_subject: Iterable[object], path: str) -> List[object]:
"""Get filtered attributes from path.
:param filter_subject: subject to be filtered
:type filter_subject: Iterable[object]
:param path: path to the attribute to be filtered
:type path: str
:return: filtered attributes
:rtype: List[object]
"""
# TODO: refactor
filtered_steps = []
filter_conditions = (
re.search("\[(.*?)\]", path).group(1).replace(" ", "").split("&&") # noqa: W605
)
filter_conditions = [
condition.replace("/", ".") for condition in filter_conditions if condition
]
for subject in filter_subject:
match = []
for condition in filter_conditions:
filter_key, filter_value = condition.split("==")
if attrgetter(filter_key)(subject) != filter_value:
match.append(False)
continue
match.append(True)
if all(match):
filtered_steps.append(subject)
return filtered_steps
@staticmethod
def get_attribute(
sagemaker_pipeline: Pipeline,
paths: List[str],
) -> List:
"""Get attribute from pipeline.
:param sagemaker_pipeline: sagemaker pipeline
:type sagemaker_pipeline: sagemaker.workflow.pipeline.Pipeline
:param paths: list of paths to the attributes to be validated
:type paths: List[str]
:return: attribute
:rtype: List
"""
# TODO: refactor
result = []
for path in paths:
attr_path = path.split(".")[1:]
sm_pipeline_copy = copy(sagemaker_pipeline)
for ix, attr in enumerate(attr_path):
if attr.endswith("]"):
has_filter_dict = attr[-2] != "["
raw_attr = attr.split("[")[0]
sm_pipeline_copy = getattr(sm_pipeline_copy, raw_attr)
if has_filter_dict:
sm_pipeline_copy = Validation.get_filtered_attributes(
sm_pipeline_copy, ".".join(attr_path[ix:])
)
else:
sm_pipeline_copy = [
getattr(sub_attr, attr)
for sub_attr in sm_pipeline_copy
if hasattr(sub_attr, attr)
]
result.append(sm_pipeline_copy)
return [x for y in result for x in y]
@abstractmethod
def run(
self,
sagemaker_pipeline: Pipeline,
) -> ValidationResult:
"""Run validation."""
raise NotImplementedError
class Report:
"""Report class."""
def __init__(self, results: List[ValidationResult]) -> None:
"""Initialize a Report object.
:param results: list of validation results
:type results: List[Dict[str, ValidationResult]]
:return: None
:rtype: None
"""
self.results: List[ValidationResult] = results
def to_df(self) -> pd.DataFrame:
"""Convert report to pandas DataFrame.
:return: report as pandas DataFrame
:rtype: pd.DataFrame
"""
df = pd.DataFrame.from_records(
data=[x.__dict__ for x in self.results],
columns=ValidationResult.__annotations__.keys(),
)
return df.reset_index(drop=True)
class ValidationFailedError(Exception):
"""Validation exception class."""
def __init__(self, validation_result: ValidationResult) -> None:
"""Initialize a ValidationFailedError object.
:param validation: error message
:type validation: Validation
:return: None
:rtype: None
"""
self.validation_result = validation_result
self.message = f"Validation failed: {validation_result.__dict__}"
super().__init__(self.message)
class Configuration:
"""Configuration class."""
def __init__(self, validations: List[Validation], sagemaker_pipeline: Pipeline) -> None:
"""Initialize a Configuration object.
:param validations: List of validations.
:type validations: List[Validation]
:param sagemaker_pipeline: SageMaker Pipeline
:type sagemaker_pipeline: sagemaker.workflow.pipeline.Pipeline
:raises ValueError: If validations is empty.
:raises TypeError: If validations is not a list.
:raises TypeError: If any validation is not of type Validation.
:return: None
:rtype: None
"""
Configuration._validate_input_validations(validations)
self.validations: List[Validation] = validations
self.sagemaker_pipeline: Pipeline = sagemaker_pipeline
@staticmethod
def _validate_input_validations(validations: List[Validation]) -> None:
"""Validate input validations.
:param validations: List of validations.
:type validations: List[Validation]
:raises ValueError: If validations is empty.
:raises TypeError: If validations is not a list.
:raises TypeError: If any validation is not of type Validation.
:return: None
:rtype: None
"""
if not validations:
raise ValueError("Validations cannot be empty.")
if not isinstance(validations, list):
raise TypeError("Validations must be a list.")
if not all(isinstance(v, Validation) for v in validations):
raise TypeError("All validations must be of type Validation.")
@staticmethod
def _make_report(
results: List[ValidationResult], return_df: bool = False
) -> Union[Report, pd.DataFrame]:
"""Make a report from a list of results.
:param results: List of results.
:type results: List[ValidationResult
:param return_df: If True, return a pandas.DataFrame instead of a Report object.
:type return_df: bool
:return: Report object or pd.DataFrame.
:rtype: Report or pd.DataFrame
"""
report = Report(results)
if return_df:
return report.to_df()
return report
@staticmethod
def _handle_empty_results(
result: Optional[ValidationResult], validation: Validation
) -> ValidationResult:
"""Handle empty results. If a Validation does not return any results
(e.g. when no observation were made), a warning is logged and a
ValidationResult indicating this is added to the result dict.
:param result: validation_result.
:type result: Optional[ValidationResult]
:param validation: Validation object.
:type validation: Validation
:return: validation result.
:rtype: ValidationResult
"""
if not result:
logging.warning(
f"Validation {validation.name} did not return any results. "
f"Please check if the paths {validation.paths} are correct."
)
return ValidationResult(
validation_name=validation.name,
success=False,
message=f"Validation {validation.name} did not return any results. "
f"Please check if the paths {validation.paths} are correct.",
subject=validation.paths,
negative=False,
)
return result
def run(self, fail_fast: bool = False, return_df: bool = False) -> Union[Report, dict]:
"""Run all validations and return a report.
:param fail_fast: If True, stop validation after the first failure.
:type fail_fast: bool
:param return_df: If True, return a pandas dataframe instead of a Report object.
:type return_df: bool
:raises ValidationFailedError: If fail_fast is True and a validation fails.
:return: Report object or pandas dataframe.
:rtype: Report or dict
"""
results = []
for ix, validation in enumerate(self.validations):
result = validation.run(self.sagemaker_pipeline)
result = Configuration._handle_empty_results(result, validation)
results.append(result)
if not result.success and fail_fast and not (ix == len(self.validations) - 1):
logging.info(
"Validation failed and fail_fast is set to True. Stopping validation "
"prematurely."
)
raise ValidationFailedError(result)
return self._make_report(results, return_df) | /sagemaker_rightline-0.3.6-py3-none-any.whl/sagemaker_rightline/model.py | 0.859929 | 0.456228 | model.py | pypi |
SageMaker Scikit-Learn Extension
================================
.. image:: https://img.shields.io/badge/License-Apache%202.0-blue.svg
:target: https://opensource.org/licenses/Apache-2.0
:alt: License
.. image:: https://img.shields.io/pypi/v/sagemaker-scikit-learn-extension.svg
:target: https://pypi.python.org/pypi/sagemaker-scikit-learn-extension
:alt: Latest Version
.. image:: https://img.shields.io/badge/code_style-black-000000.svg
:target: https://github.com/python/black
:alt: Code style: black
SageMaker Scikit-Learn Extension is a Python module for machine learning built on top of `scikit-learn <https://scikit-learn.org>`_.
This project contains standalone scikit-learn estimators and additional tools to support SageMaker Autopilot. Many of the additional estimators are based on existing scikit-learn estimators.
User Installation
-----------------
To install,
::
# install from pip
pip install sagemaker-scikit-learn-extension
In order to use the I/O functionalies in the :code:`sagemaker_sklearn_extension.externals` module, you will also need to install the :code:`mlio` version 0.7 package via conda. The :code:`mlio` package is only available through conda at the moment.
To install :code:`mlio`,
::
# install mlio
conda install -c mlio -c conda-forge mlio-py==0.7
To see more information about mlio, see https://github.com/awslabs/ml-io.
You can also install from source by cloning this repository and running a ``pip install`` command in the root directory of the repository:
::
# install from source
git clone https://github.com/aws/sagemaker-scikit-learn-extension.git
cd sagemaker-scikit-learn-extension
pip install -e .
Supported Operating Systems
---------------------------
SageMaker scikit-learn extension supports Unix/Linux and Mac.
Supported Python Versions
-------------------------
SageMaker scikit-learn extension is tested on:
- Python 3.7
License
-------
This library is licensed under the Apache 2.0 License.
Development
-----------
We welcome contributions from developers of all experience levels.
The SageMaker scikit-learn extension is meant to be a repository for scikit-learn estimators that don't meet scikit-learn's stringent inclusion criteria.
Setup
-----
We recommend using conda for development and testing.
To download conda, go to the `conda installation guide <https://conda.io/projects/conda/en/latest/user-guide/install/index.html>`_.
Running Tests
-------------
SageMaker scikit-learn extension contains an extensive suite of unit tests.
You can install the libraries needed to run the tests by running :code:`pip install --upgrade .[test]` or, for Zsh users: :code:`pip install --upgrade .\[test\]`
For unit tests, tox will use pytest to run the unit tests in a Python 3.7 interpreter. tox will also run flake8 and pylint for style checks.
conda is needed because of the dependency on mlio 0.7.
To run the tests with tox, run:
::
tox
Running on SageMaker
--------------------
To use sagemaker-scikit-learn-extension on SageMaker, you can build the `sagemaker-scikit-learn-extension-container <https://github.com/aws/sagemaker-scikit-learn-container>`_.
Overview of Submodules
----------------------
* :code:`sagemaker_sklearn_extension.decomposition`
* :code:`RobustPCA` dimension reduction for dense and sparse inputs
* :code:`sagemaker_sklearn_extension.externals`
* :code:`AutoMLTransformer` utility class encapsulating feature and target transformation functionality used in SageMaker Autopilot
* :code:`Header` utility class to manage the header and target columns in tabular data
* :code:`read_csv_data` reads comma separated data and returns a numpy array (uses mlio)
* :code:`sagemaker_sklearn_extension.feature_extraction.date_time`
* :code:`DateTimeVectorizer` convert datetime objects or strings into numeric features
* :code:`sagemaker_sklearn_extension.feature_extraction.sequences`
* :code:`TSFlattener` convert strings of sequences into numeric features
* :code:`TSFreshFeatureExtractor` compute row-wise time series features from a numpy array (uses tsfresh)
* :code:`sagemaker_sklearn_extension.feature_extraction.text`
* :code:`MultiColumnTfidfVectorizer` convert collections of raw documents to a matrix of TF-IDF features
* :code:`sagemaker_sklearn_extension.impute`
* :code:`RobustImputer` imputer for missing values with customizable mask_function and multi-column constant imputation
* :code:`RobustMissingIndicator` binary indicator for missing values with customizable mask_function
* :code:`sagemaker_sklearn_extension.preprocessing`
* :code:`BaseExtremeValuesTransformer` customizable transformer for columns that contain "extreme" values (columns that are heavy tailed)
* :code:`LogExtremeValuesTransformer` stateful log transformer for columns that contain "extreme" values (columns that are heavy tailed)
* :code:`NALabelEncoder` encoder for transforming labels to NA values
* :code:`QuadraticFeatures` generate and add quadratic features to feature matrix
* :code:`QuantileExtremeValuesTransformer` stateful quantiles transformer for columns that contain "extreme" values (columns that are he
* :code:`ThresholdOneHotEncoder` encode categorical integer features as a one-hot numeric array, with optional restrictions on feature encoding
* :code:`RemoveConstantColumnsTransformer` removes constant columns
* :code:`RobustLabelEncoder` encode labels for seen and unseen labels
* :code:`RobustStandardScaler` standardization for dense and sparse inputs
* :code:`WOEEncoder` weight of evidence supervised encoder
* :code:`SimilarityEncoder` encode categorical values based on their descriptive string
| /sagemaker-scikit-learn-extension-2.5.0.tar.gz/sagemaker-scikit-learn-extension-2.5.0/README.rst | 0.937719 | 0.651805 | README.rst | pypi |
import torch
from torch import nn
import numpy as np
class LambdaLogSoftmax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, *args, **kwargs):
return nn.functional.log_softmax(dim=self.dim, *args, **kwargs)
class GBN(torch.nn.Module):
"""
Ghost Batch Normalization
https://arxiv.org/abs/1705.08741
"""
def __init__(self, input_dim, virtual_batch_size=128, momentum=0.01):
super(GBN, self).__init__()
self.input_dim = input_dim
self.virtual_batch_size = virtual_batch_size
self.bn = nn.BatchNorm1d(self.input_dim, momentum=momentum)
def forward(self, x):
chunks = x.chunk(int(np.ceil(x.shape[0] / self.virtual_batch_size)), 0)
res = [self.bn(x_) for x_ in chunks]
return torch.cat(res, dim=0)
class EmbeddingGenerator(torch.nn.Module):
"""
Classical embeddings generator
adopted from https://github.com/dreamquark-ai/tabnet/
"""
def __init__(self, input_dim, cat_dims, cat_idxs, cat_emb_dim=None):
""" This is an embedding module for an entire set of features
Parameters
----------
input_dim : int
Number of features coming as input (number of columns)
cat_dims : list of int
Number of modalities for each categorial features
If the list is empty, no embeddings will be done
cat_idxs : list of int
Positional index for each categorical features in inputs
cat_emb_dim : int or list of int
Embedding dimension for each categorical features
If int, the same embdeding dimension will be used for all categorical features
"""
super(EmbeddingGenerator, self).__init__()
if cat_dims == [] or cat_idxs == []:
self.skip_embedding = True
self.post_embed_dim = input_dim
return
if cat_emb_dim is None:
# use heuristic
cat_emb_dim = [min(600, round(1.6 * n_cats ** 0.56)) for n_cats in cat_dims]
# heuristic
self.skip_embedding = False
if isinstance(cat_emb_dim, int):
self.cat_emb_dims = [cat_emb_dim] * len(cat_idxs)
else:
self.cat_emb_dims = cat_emb_dim
# check that all embeddings are provided
if len(self.cat_emb_dims) != len(cat_dims):
msg = """ cat_emb_dim and cat_dims must be lists of same length, got {len(self.cat_emb_dims)}
and {len(cat_dims)}"""
raise ValueError(msg)
self.post_embed_dim = int(input_dim + np.sum(self.cat_emb_dims) - len(self.cat_emb_dims))
self.embeddings = torch.nn.ModuleList()
# Sort dims by cat_idx
sorted_idxs = np.argsort(cat_idxs)
cat_dims = [cat_dims[i] for i in sorted_idxs]
self.cat_emb_dims = [self.cat_emb_dims[i] for i in sorted_idxs]
for cat_dim, emb_dim in zip(cat_dims, self.cat_emb_dims):
self.embeddings.append(torch.nn.Embedding(cat_dim, emb_dim))
# record continuous indices
self.continuous_idx = torch.ones(input_dim, dtype=torch.bool)
self.continuous_idx[cat_idxs] = 0
def forward(self, x):
"""
Apply embdeddings to inputs
Inputs should be (batch_size, input_dim)
Outputs will be of size (batch_size, self.post_embed_dim)
"""
if self.skip_embedding:
# no embeddings required
return x
cols = []
cat_feat_counter = 0
for feat_init_idx, is_continuous in enumerate(self.continuous_idx):
# Enumerate through continuous idx boolean mask to apply embeddings
if is_continuous:
cols.append(x[:, feat_init_idx].float().view(-1, 1))
else:
cols.append(self.embeddings[cat_feat_counter](x[:, feat_init_idx].long()))
cat_feat_counter += 1
# concat
post_embeddings = torch.cat(cols, dim=1)
return post_embeddings
def weight_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight) | /sagemaker-scikit-learn-extension-2.5.0.tar.gz/sagemaker-scikit-learn-extension-2.5.0/src/sagemaker_sklearn_extension/contrib/taei/nn_utils.py | 0.937318 | 0.453262 | nn_utils.py | pypi |
import numpy as np
class StarOversampler:
"""
Implementation of the oversampler proposed in [1] using the `star` topology. The implementation is based on the
implementation of https://github.com/analyticalmindsltd/smote_variants
Parameters
----------
proportion: float (default = 1)
proportion of the difference of n_maj and n_min to sample e.g. 1.0 means that after sampling the number of
minority samples will be equal to the number of majority samples
References
----------
.. [1] Gazzah, S. and Amara, N. E. B. "New Oversampling Approaches Based on Polynomial Fitting for Imbalanced Data
Sets" The Eighth IAPR International Workshop on Document Analysis Systems
"""
def __init__(self, proportion=1.0):
self.proportion = proportion
def fit(self, X, y=None):
pass
def resample(self, X, y, verbose=False):
"""
Generate synthetic minority samples
"""
unique, counts = np.unique(y, return_counts=True)
class_stats = dict(zip(unique, counts))
min_label = unique[0] if counts[0] < counts[1] else unique[1]
maj_label = unique[1] if counts[0] < counts[1] else unique[0]
# determine the number of samples to generate
n_to_sample = self.det_n_to_sample(self.proportion, class_stats[maj_label], class_stats[min_label])
if n_to_sample == 0:
if verbose:
print("StarOversampler: Sampling is not needed")
return X.copy(), y.copy()
samples = []
# Implementation of the star topology
X_min = X[y == min_label]
X_mean = np.mean(X_min, axis=0)
k = max([1, int(np.rint(n_to_sample / len(X_min)))])
for x in X_min:
diff = X_mean - x
for i in range(1, k + 1):
samples.append(x + float(i) / (k + 1) * diff)
return np.vstack([X, np.vstack(samples)]), np.hstack([y, np.repeat(min_label, len(samples))])
def det_n_to_sample(self, proportion, n_maj, n_min):
"""
Determines the number of samples to generate
Parameters
----------
proportion: float
proportion of the difference of n_maj and n_min to sample e.g. 1.0 means that after sampling the number of
minority samples will be equal to the number of majority samples
n_maj: int
number of majority samples
n_min: int
number of minority samples
"""
return max([0, int((n_maj - n_min) * proportion)]) | /sagemaker-scikit-learn-extension-2.5.0.tar.gz/sagemaker-scikit-learn-extension-2.5.0/src/sagemaker_sklearn_extension/contrib/taei/star_oversampler.py | 0.915474 | 0.901227 | star_oversampler.py | pypi |
from abc import abstractmethod
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset
from torch.optim.lr_scheduler import MultiplicativeLR
from .nn_utils import GBN, LambdaLogSoftmax, weight_init, EmbeddingGenerator
class BaseModel(nn.Module):
"""
Base class for all models
"""
@abstractmethod
def forward(self, *inputs):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
@abstractmethod
def loss(self, output, target):
raise NotImplementedError
def decode_sample(self, z):
x_hat = self.decode(z)
x_cont, x_cat = [], []
if hasattr(self, "cont_net"):
x_cont = x_hat.pop(0)
if hasattr(self, "cat_nets"):
for _ in self.categorical_features:
x_cat.append(torch.argmax(x_hat.pop(0), dim=1))
x = []
cont_c, cat_c = 0, 0
for i in range(self.input_dim):
if i in self.continuous_features:
x.append(x_cont[:, cont_c].reshape(-1, 1))
cont_c += 1
elif i in self.categorical_features:
x.append(x_cat[cat_c].reshape(-1, 1))
cat_c += 1
x = torch.cat(x, dim=1)
return x
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super().__str__() + "\nTrainable parameters: {}".format(params)
def fit(
self,
X_train,
y_train,
X_validation=None,
y_validation=None,
loss_key="opt",
batch_size=128,
num_workers=0,
learning_rate=1e-3,
learning_rate_lambda=0.995,
max_epoch=10000,
early_stopping=100,
device="cpu",
verbose=False,
):
"""
Train the model using gradient descent back propagation
Parameters
----------
X_train : {array-like, sparse matrix} of shape (n_samples, n_features)
Features matrix used to train the model
y_train : vector-like of shape (n_samples, 1)
The target vector used to train the model
X_validation : {array-like, sparse matrix} of shape (n_samples, n_features)
Features matrix used for early stopping of the training
y_validation : vector-like of shape (n_samples, 1)
The target vector used for early stopping of the training
loss_key: string (default = 'opt')
Which field of the loss dictionary to optimize
batch_size: int (default = 128)
Batch size
num_workers: int (default = 0)
Number of cpus to use
learning_rate: float (default = 1e-3)
Gradient descent learning rate
learning_rate_lambda: float (default = 0.995)
The rate of decreasing learning_rate
max_epoch: int (default = 10000)
The maximum number of optimization epochs
early_stopping: int (default = 100)
The number of epochs without improving the bast validation loss allowed before stopping
device : 'cpu' or 'gpu' (default = 'cpu')
Device used by pytorch for training the model and using the trained model for encoding/decoding
verbose: True or False (default = False)
Verbosity
"""
assert X_train.shape[1] == self.input_dim
self.to(device)
train_loader = torch.utils.data.DataLoader(
TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train)),
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
)
if X_validation is not None:
validation_loader = torch.utils.data.DataLoader(
TensorDataset(torch.Tensor(X_validation), torch.Tensor(y_validation)),
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
)
else:
validation_loader = None
optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
scheduler = MultiplicativeLR(optimizer, lr_lambda=(lambda epoch: learning_rate_lambda))
best_validation_loss = None
iter_no_improve = 0
for epoch in range(max_epoch):
self.train()
training_loss = 0
for data in train_loader:
Xb = data[0].to(device)
optimizer.zero_grad()
output = self(Xb)
loss = self.loss(output, Xb)[loss_key]
loss.backward()
optimizer.step()
training_loss += loss.detach().cpu().numpy()
self.eval()
validation_loss = 0
if validation_loader:
with torch.no_grad():
for data in validation_loader:
Xb = data[0].to(device)
output = self(Xb)
loss = self.loss(output, Xb)[loss_key]
validation_loss += loss.detach().cpu().numpy()
if best_validation_loss is None or validation_loss < best_validation_loss:
best_validation_loss = validation_loss
iter_no_improve = 0
else:
iter_no_improve += 1
if iter_no_improve > early_stopping:
if verbose:
print(f"Early stopping after {epoch} epochs")
break
scheduler.step()
if verbose:
print(f"[{epoch}] training loss={training_loss}, validation loss={validation_loss}")
return self
class AE(BaseModel):
"""
Vanilla autoencoder based on 10.1126/science.1127647
Parameters
----------
categorical_features : list of integers
The indexes of the categorical features in the input array
categorical_features : list of integers
The cardinality of the categorical features in the input array
continuous_features : list of integers
The indexes of the continuous features in the input array
latent_dim : integer (default = 32)
The size of the latent dimension of the autoencoder
hidden_dim : list of integers (default = [128, 128, 128])
The hidden layer sizes of the autoencoder. The sizes are used for both the encoder and the decoder
nll_weight : float (default = 0.3)
Weight of the nll component in the loss
"""
def __init__(
self,
categorical_features,
categorical_dims,
continuous_features,
latent_dim=32,
hidden_dim=None,
nll_weight=0.3,
):
super().__init__()
if not hidden_dim:
hidden_dim = [128, 128, 128]
elif not isinstance(hidden_dim, list):
hidden_dim = [
hidden_dim,
]
assert len(categorical_features) == len(categorical_dims)
self.categorical_features, self.categorical_dims = [], []
if categorical_features and categorical_dims:
self.categorical_features, self.categorical_dims = zip(*sorted(zip(categorical_features, categorical_dims)))
self.continuous_features = sorted(continuous_features)
self.latent_dim = latent_dim
self.hidden_dim = hidden_dim
self.nll_weight = nll_weight
self.input_dim = len(continuous_features) + len(categorical_features)
self.embeddings = EmbeddingGenerator(self.input_dim, categorical_dims, categorical_features)
self.post_embed_dim = self.embeddings.post_embed_dim
hidden_dim = [self.post_embed_dim] + hidden_dim
# Encoder
self.encoder = [self.embeddings]
for i in range(1, len(hidden_dim)):
self.encoder.extend(
(nn.Linear(hidden_dim[i - 1], hidden_dim[i]), GBN(hidden_dim[i]), nn.PReLU(hidden_dim[i]))
)
self.encoder.append(nn.Linear(hidden_dim[-1], latent_dim))
self.encoder = nn.Sequential(*self.encoder)
# Decoder
hidden_dim = hidden_dim + [latent_dim]
self.decoder = []
for i in range(len(hidden_dim) - 1, 1, -1):
self.decoder.extend(
(nn.Linear(hidden_dim[i], hidden_dim[i - 1]), GBN(hidden_dim[i - 1]), nn.PReLU(hidden_dim[i - 1]))
)
self.decoder = nn.Sequential(*self.decoder)
if self.continuous_features:
self.cont_net = nn.Sequential(nn.Linear(hidden_dim[1], len(self.continuous_features)),)
if self.categorical_features:
self.cat_nets = nn.ModuleList()
for i, n_cats in zip(self.categorical_features, self.categorical_dims):
self.cat_nets.append(nn.Sequential(nn.Linear(hidden_dim[1], n_cats), LambdaLogSoftmax(dim=1)))
self.apply(weight_init)
def decode(self, z):
"""note: order of decoding is important for loss function"""
z = self.decoder(z)
x_hat = []
if hasattr(self, "cont_net"):
x_hat.append(self.cont_net(z))
if hasattr(self, "cat_nets"):
for m in self.cat_nets:
x_hat.append(m(z))
return x_hat
def encode(self, x):
return self.encoder(x)
def forward(self, x):
z = self.encode(x)
x_hat = self.decode(z)
return x_hat, z
def loss(self, output, target):
loss = {"mse": 0.0, "nll": 0.0}
x_hat = output[0]
if self.continuous_features:
out = x_hat.pop(0)
loss["mse"] = nn.functional.mse_loss(target[:, self.continuous_features], out)
if self.categorical_features:
for idx in self.categorical_features:
out = x_hat.pop(0)
loss["nll"] += nn.functional.nll_loss(out, target[:, idx].long())
loss["opt"] = loss["mse"] + self.nll_weight * loss["nll"]
return loss
class VAE(BaseModel):
"""
Variational autoencoder based on the vanilla autoencoder proposed in https://arxiv.org/abs/1312.6114
Parameters
----------
categorical_features : list of integers
The indexes of the categorical features in the input array
categorical_features : list of integers
The cardinality of the categorical features in the input array
continuous_features : list of integers
The indexes of the continuous features in the input array
latent_dim : integer (default = 32)
The size of the latent dimension of the autoencoder
hidden_dim : list of integers (default = [128, 128, 128])
The hidden layer sizes of the autoencoder. The sizes are used for both the encoder and the decoder
nll_weight : float (default = 0.3)
Weight of the nll component in the loss
kld_weight : float (default = 0.1)
Weight of the kld component in the loss
"""
def __init__(
self,
categorical_features,
categorical_dims,
continuous_features,
latent_dim=32,
hidden_dim=None,
nll_weight=0.3,
kld_weight=0.1,
):
super().__init__()
if not hidden_dim:
hidden_dim = [128, 128, 128]
elif not isinstance(hidden_dim, list):
hidden_dim = [
hidden_dim,
]
assert len(categorical_features) == len(categorical_dims)
self.categorical_features, self.categorical_dims = [], []
if categorical_features and categorical_dims:
self.categorical_features, self.categorical_dims = zip(*sorted(zip(categorical_features, categorical_dims)))
self.continuous_features = sorted(continuous_features)
self.latent_dim = latent_dim
self.hidden_dim = hidden_dim
self.nll_weight = nll_weight
self.kld_weight = kld_weight
self.input_dim = len(continuous_features) + len(categorical_features)
self.embeddings = EmbeddingGenerator(self.input_dim, categorical_dims, categorical_features)
self.post_embed_dim = self.embeddings.post_embed_dim
hidden_dim = [self.post_embed_dim] + hidden_dim
# Encoder
self.encoder = [self.embeddings]
for i in range(1, len(hidden_dim)):
self.encoder.extend(
(nn.Linear(hidden_dim[i - 1], hidden_dim[i]), GBN(hidden_dim[i]), nn.PReLU(hidden_dim[i]))
)
self.encoder.append(nn.Linear(hidden_dim[-1], 2 * latent_dim))
self.encoder = nn.Sequential(*self.encoder)
# Decoder
hidden_dim = hidden_dim + [latent_dim]
self.decoder = []
for i in range(len(hidden_dim) - 1, 1, -1):
self.decoder.extend(
(nn.Linear(hidden_dim[i], hidden_dim[i - 1]), GBN(hidden_dim[i - 1]), nn.PReLU(hidden_dim[i - 1]))
)
self.decoder = nn.Sequential(*self.decoder)
if self.continuous_features:
self.cont_net = nn.Sequential(nn.Linear(hidden_dim[1], len(self.continuous_features)),)
if self.categorical_features:
self.cat_nets = nn.ModuleList()
for i, n_cats in zip(self.categorical_features, self.categorical_dims):
self.cat_nets.append(nn.Sequential(nn.Linear(hidden_dim[1], n_cats), LambdaLogSoftmax(dim=1)))
self.apply(weight_init)
def decode(self, z):
"""note: order of decoding is important for loss function"""
z = self.decoder(z)
x_hat = []
if hasattr(self, "cont_net"):
x_hat.append(self.cont_net(z))
if hasattr(self, "cat_nets"):
for m in self.cat_nets:
x_hat.append(m(z))
return x_hat
def encode(self, x: torch.Tensor, is_forward=False) -> torch.Tensor:
encoded = self.encoder(x)
mu, log_var = torch.split(encoded, encoded.shape[-1] // 2, dim=1)
if is_forward:
eps = torch.randn_like(mu)
std = torch.exp(0.5 * log_var)
z = eps * std + mu
return z, mu, log_var
return mu
def forward(self, x):
z, mu, log_var = self.encode(x, is_forward=True)
x_hat = self.decode(z)
return x_hat, mu, log_var
def loss(self, output, target):
loss = {"mse": 0.0, "nll": 0.0, "kld": 0.0}
mu = output[1]
log_var = output[2]
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0)
loss["kld"] = kld_loss
x_hat = output[0]
if self.continuous_features:
out = x_hat.pop(0)
loss["mse"] = nn.functional.mse_loss(target[:, self.continuous_features], out)
if self.categorical_features:
for idx in self.categorical_features:
out = x_hat.pop(0)
loss["nll"] += nn.functional.nll_loss(out, target[:, idx].long())
loss["opt"] = loss["mse"] + self.kld_weight * loss["kld"] + self.nll_weight * loss["nll"]
return loss | /sagemaker-scikit-learn-extension-2.5.0.tar.gz/sagemaker-scikit-learn-extension-2.5.0/src/sagemaker_sklearn_extension/contrib/taei/models.py | 0.961061 | 0.490419 | models.py | pypi |
import os
from math import ceil
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_array, check_is_fitted
from tsfresh import extract_features
from tsfresh.feature_extraction import ComprehensiveFCParameters
from tsfresh.feature_extraction import EfficientFCParameters
from tsfresh.feature_extraction import MinimalFCParameters
from tsfresh.utilities.dataframe_functions import impute
from tsfresh.defaults import N_PROCESSES # the default number of processes used by TSFresh, equals to n_vcores/2
TOTAL_EXPANSION_THRESHOLD = 2500
DEFAULT_INPUT_SEQUENCE_LENGTH = 1000
SEQUENCE_EXPANSION_FACTOR = 2.5
# do not use TSFresh parallelism in container serve(transform), does not work with server's workers
N_TSFRESH_JOBS = 0 if os.environ.get("SAGEMAKER_PROGRAM") == "sagemaker_serve" else N_PROCESSES
class TSFeatureExtractor(BaseEstimator, TransformerMixin):
"""Wrap TSFlattener and TSFreshFeatureExtractor to extract time series features from multiple sequence columns.
The input is an array where rows are observations and columns are sequence features. Each sequence feature is a
string containing a sequence of comma-separate values.
For each column, TSFlattener extracts numerical values from the strings and returns a list of np.arrays as output,
and then TSFreshFeatureExtractor extracts time series features from each list. The outputs from each column are then
stacked horizontally into a single array.
Examples of features are the mean, median, kurtosis, and autocorrelation of each sequence. The full list of
extracted features can be found at https://tsfresh.readthedocs.io/en/latest/text/list_of_features.html.
Any value in the input strings that can't be turned into a finite float is converted to a np.nan.
See TSFlattener and TSFreshFeatureExtractor for more details.
Parameters
----------
max_allowed_length : int (default = 10000)
Maximum allowed length of an input sequence. If the length of a sequence is greater than ``max_allowed_length``,
the transformer will truncate its beginning or end as indicated by ``trim_beginning``.
trim_beginning : bool (default = True)
If a sequence length exceeds ``max_allowed_length``, trim its start and only keep the last `max_allowed_length``
values if ``trim_beginning`` = True, otherwise trim the end and only keep the first max_allowed_length values.
augment : boolean (default=False):
Whether to append the tsfresh features to the original data (if True),
or output only the extracted tsfresh features (if False).
If True, also pad shorter sequences (if any) in the original data with np.nans, so that all sequences
match the length of the longest sequence, and interpolate them as indicated by ``interpolation_method``.
interpolation_method : {'linear', 'fill', 'zeroes', 'hybrid', None} (default='hybrid')
'linear': linear interpolation
'fill': forward fill to complete the sequences; then, backfill in case of NaNs at the start of the sequence.
'zeroes': pad with zeroes
'hybrid': replace with zeroes any NaNs at the end or start of the sequences, and forward fill NaNs in between
None: no interpolation
extraction_type : {'minimal', 'efficient', 'all'} (default='efficient')
Control the number of features extracted from tsfresh.
'minimal': most of the feature extractors are disabled and only a small subset is used
'efficient': extract 781 tsfresh features, namely all of them except for the expensive to compute ones
'all': extract all 787 tsfresh features
extraction_seed : int (default = 0)
Random seed used to choose subset of features, when expansion control does not allow to include all features
sequences_lengths_q25 : list of ints (default = None)
List contianing 25th percentile of sequence lengths for each column at the train step.
Length of the list should correspond to total number of columns in the input.
If not provided, default value will be assigned at the fit stage.
Examples
--------
>>> from sagemaker_sklearn_extension.feature_extraction.sequences import TSFeatureExtractor
>>> import numpy as np
>>> data = [["1,, 3, 44", "3, 4, 5, 6"], ["11, 111", "1, 1, 2, 2"], ["NaN, , 1, NaN", "2, 1, 3, 2"]]
>>> ts_pipeline = TSFeatureExtractor(augment=True)
>>> X = ts_pipeline.fit_transform(data)
>>> print(X.shape)
(3, 1570)
>>> ts_pipeline = TSFeatureExtractor(augment=False)
>>> X = ts_pipeline.fit_transform(data)
>>> print(X.shape)
(3, 1562)
"""
def __init__(
self,
max_allowed_length=10000,
trim_beginning=True,
augment=False,
interpolation_method="hybrid",
extraction_type="efficient",
extraction_seed=0,
sequences_lengths_q25=None,
):
super().__init__()
if max_allowed_length <= 0:
raise ValueError(f"{max_allowed_length} must be positive.\n")
self.max_allowed_length = max_allowed_length
self.trim_beginning = trim_beginning
self.augment = augment
self.interpolation_method = interpolation_method
self.extraction_type = extraction_type
self.extraction_seed = extraction_seed
self.sequences_lengths_q25 = sequences_lengths_q25
def fit(self, X, y=None):
X = check_array(X, dtype=None, force_all_finite="allow-nan")
if self.sequences_lengths_q25 is None:
self.sequences_lengths_q25 = [DEFAULT_INPUT_SEQUENCE_LENGTH] * X.shape[1]
if len(self.sequences_lengths_q25) != X.shape[1]:
raise ValueError(
f"length of sequences_lengths_q25 should be equal to number of columns in X (={X.shape[1]})."
)
# cap total expansion for all columns
expansion_thresholds = np.ceil(
(self.sequences_lengths_q25 / np.sum(self.sequences_lengths_q25)) * TOTAL_EXPANSION_THRESHOLD
)
ts_flattener = TSFlattener(max_allowed_length=self.max_allowed_length, trim_beginning=self.trim_beginning)
tsfresh_feature_extractors = []
for sequence_column_i, sequence_column in enumerate(X.T):
numeric_sequences = ts_flattener.transform(sequence_column.reshape(-1, 1))
tsfresh_feature_extractor = TSFreshFeatureExtractor(
augment=self.augment,
interpolation_method=self.interpolation_method,
extraction_type=self.extraction_type,
extraction_seed=self.extraction_seed,
sequence_length_q25=self.sequences_lengths_q25[sequence_column_i],
expansion_threshold=int(expansion_thresholds[sequence_column_i]),
)
tsfresh_feature_extractor.fit(numeric_sequences)
tsfresh_feature_extractors.append(tsfresh_feature_extractor)
self.tsfresh_feature_extractors_ = tsfresh_feature_extractors
return self
def transform(self, X, y=None):
"""Apply TSFlattener followed by TSFreshFeatureExtractor to each sequence column in X.
Parameters
----------
X : np.array (each column is a list of strings)
Returns
-------
X : np.array (all values are numerical)
"""
X = check_array(X, dtype=None, force_all_finite="allow-nan")
check_is_fitted(self, "tsfresh_feature_extractors_")
ts_flattener = TSFlattener(max_allowed_length=self.max_allowed_length, trim_beginning=self.trim_beginning)
sequences_with_features = []
for id_column, sequence_column in enumerate(X.T):
numeric_sequences = ts_flattener.transform(sequence_column.reshape(-1, 1))
X_with_features = self.tsfresh_feature_extractors_[id_column].transform(numeric_sequences)
sequences_with_features.append(X_with_features)
X = np.hstack(sequences_with_features)
return X
def _more_tags(self):
return {"X_types": ["string"], "allow_nan": True}
class TSFlattener(BaseEstimator, TransformerMixin):
"""Convert lists of strings of varying length into an np.array.
The input is a collection of lists of strings, with each string containing a sequence of comma-separate numbers.
TSFlattener extracts numerical values from these strings and returns a list of np.arrays as output.
Any missing values (represented either by a NaN, inf or empty string) are converted to np.nans.
Any entire sequence that cannot be parsed (represented by a None) is converted to a np.nan.
Parameters
----------
max_allowed_length : int (default = 10000)
Maximum allowed length of an input sequence. If the length of a sequence is greater than ``max_allowed_length``,
the transformer will truncate its beginning or end as indicated by ``trim_beginning``.
trim_beginning : bool (default = True)
If a sequence length exceeds ``max_allowed_length``, trim its start and only keep the last `max_allowed_length``
values if ``trim_beginning`` = True, otherwise trim the end and only keep the first max_allowed_length values.
Examples
--------
>>> from sagemaker_sklearn_extension.feature_extraction.sequences import TSFlattener
>>> import numpy as np
>>> data = [["1,, 3, 44"], ["11, 111"], ["NaN, , 1, NaN"]]
>>> ts_flattener = TSFlattener()
>>> X = ts_flattener.transform(data)
>>> print(len(X))
3
>>> print(X)
[array([ 1., nan, 3., 44.]), array([ 11., 111.]), array([nan, nan, 1., nan])]
>>> ts_flattener = TSFlattener(max_allowed_length=2, trim_beginning=True)
>>> X = ts_flattener.transform(data)
>>> print(X)
[array([ 3., 44.]), array([ 11., 111.]), array([ 1., nan])]
"""
def __init__(self, max_allowed_length=10000, trim_beginning=True):
super().__init__()
if max_allowed_length <= 0:
raise ValueError(f"{max_allowed_length} must be positive.\n")
self.max_allowed_length = max_allowed_length
self.trim_beginning = trim_beginning
def fit(self, X, y=None):
check_array(X, dtype=None, force_all_finite="allow-nan")
return self
def transform(self, X, y=None):
"""Extract numerical values from strings of comma-separated numbers and returns an np.array.
Anything that can't be turned into a finite float is converted to a np.nan.
Parameters
----------
X : lists of strings
Returns
-------
X : List of np.arrays
"""
X = check_array(X, dtype=None, force_all_finite="allow-nan")
# Parse the input strings
numeric_sequences = self._convert_to_numeric(X)
return numeric_sequences
def _convert_to_numeric(self, X):
numeric_sequences = []
for string_sequence in X:
if len(string_sequence) != 1:
raise ValueError(
f"TSFlattener can process a single sequence column at a time, "
f"but it was given {len(string_sequence)} sequence columns.\n"
)
numeric_sequence = []
if string_sequence[0] is not None:
for s in string_sequence[0].split(","):
# Turn anything that can't be converted to a finite float to np.nan
try:
s = float(s)
except ValueError:
s = np.nan
if np.isinf(s):
s = np.nan
numeric_sequence.append(s)
else:
numeric_sequence.append(np.nan)
numeric_sequence = self._truncate_sequence(numeric_sequence)
numeric_sequences.append(numeric_sequence)
# Convert to list of np.arrays
numeric_sequences = [np.array(sequence) for sequence in numeric_sequences]
return numeric_sequences
def _truncate_sequence(self, numeric_sequence):
if self.trim_beginning:
numeric_sequence = numeric_sequence[-self.max_allowed_length :]
else:
numeric_sequence = numeric_sequence[: self.max_allowed_length]
return numeric_sequence
def _more_tags(self):
return {"X_types": ["string"], "allow_nan": True}
class TSFreshFeatureExtractor(BaseEstimator, TransformerMixin):
"""Extract features computed by tsfresh from each input array or list and append them to the input
array (if augment = True) or return the extracted features alone (if augment = False).
Examples of these features are the mean, median, kurtosis, and autocorrelation of each sequence. The full list
of extracted features can be found at https://tsfresh.readthedocs.io/en/latest/text/list_of_features.html.
Any np.nans in the input arrays/lists are dropped before extracting the tsfresh features.
Parameters
----------
augment : boolean (default=False):
Whether to append the tsfresh features to the original data (if True),
or output only the extracted tsfresh features (if False).
If True, also pad shorter sequences (if any) in the original data with np.nans, so that all sequences
match the length of the longest sequence, and interpolate them as indicated by ``interpolation_method``.
interpolation_method : {'linear', 'fill', 'zeroes', 'hybrid', None} (default='hybrid')
'linear': linear interpolation
'fill': forward fill to complete the sequences; then, backfill in case of NaNs at the start of the sequence.
'zeroes': pad with zeroes
'hybrid': replace with zeroes any NaNs at the end or start of the sequences, and forward fill NaNs in between
None: no interpolation
extraction_type : {'minimal', 'efficient', 'all'} (default='efficient')
Control the number of features extracted from tsfresh.
'minimal': most of the feature extractors are disabled and only a small subset is used
'efficient': extract 781 tsfresh features, namely all of them except for the expensive to compute ones
'all': extract all 787 tsfresh features
extraction_seed : int (default = 0)
Random seed used to choose subset of features, when expansion control does not allow to include all features
sequence_length_q25 : list of ints (default = None)
List contianing 25th percentile of sequence lengths for each column at the train step.
If not provided, default value will be assigned (DEFAULT_INPUT_SEQUENCE_LENGTH).
Examples
--------
>>> from sagemaker_sklearn_extension.feature_extraction.sequences import TSFreshFeatureExtractor
>>> import numpy as np
>>> data = [np.array([ 3., np.nan, 4.]), np.array([ 5, 6]), np.array([8, np.nan, np.nan, 10])]
>>> tsfresh_feature_extractor = TSFreshFeatureExtractor(augment=True, interpolation_method="hybrid")
>>> X = tsfresh_feature_extractor.fit_transform(data)
>>> print(X.shape)
(3, 785)
>>> print(X[:4, :4])
[[ 3. 3. 4. 0.]
[ 5. 6. 0. 0.]
[ 8. 8. 8. 10.]]
>>> tsfresh_feature_extractor = TSFreshFeatureExtractor(augment=False)
>>> X = tsfresh_feature_extractor.fit_transform(data)
>>> print(X.shape)
(3, 781)
"""
def __init__(
self,
augment=False,
interpolation_method="hybrid",
extraction_type="efficient",
extraction_seed=0,
sequence_length_q25=None,
expansion_threshold=None,
):
super().__init__()
self.augment = augment
self.interpolation_method = interpolation_method
self.extraction_type = extraction_type
self.feature_sampling_seed = extraction_seed
self.sequence_length_q25 = sequence_length_q25 or DEFAULT_INPUT_SEQUENCE_LENGTH
expansion_threshold = expansion_threshold or self._compute_expansion_threshold(self.sequence_length_q25)
self.expansion_threshold = min(expansion_threshold, self._compute_expansion_threshold(self.sequence_length_q25))
# expansion_threshold will be the stricter between the one computed for this column and the one respecting
# the total expansion for all columns
def fit(self, X, y=None):
# Nothing to learn during fit.
return self
def transform(self, X, y=None):
"""Extract features computed by tsfresh from each input array/list.
Parameters
----------
X : list of np.arrays or list of lists
Returns
-------
tsfresh_features : np.array
"""
tsfresh_features, X_df = self._extract_tsfresh_features(X)
if self.augment:
# Stack the extracted features to the original sequences in X, after padding with np.nans any shorter
# input sequences in X to match the length of the longest sequence, and imputing missing values as
# specified by interpolation_method
X_df_padded = self._interpolate(X_df)
X = X_df_padded.groupby("id").agg(lambda x: x.tolist())[0].to_numpy()
X = np.stack(X, axis=0)
tsfresh_features = np.hstack((X, tsfresh_features))
return tsfresh_features
def _impute_ts(self, X, interpolation_method):
"""Impute time series missing values by linear interpolation,
forward/backward filling, or padding with zeroes.
"""
if interpolation_method == "linear":
X[0] = X[0].interpolate(method=interpolation_method, limit_direction="both")
elif interpolation_method == "fill":
# Forward fill to complete the sequences. Then, backfill in case of NaNs at the start of the sequence.
X[0] = X[0].interpolate(method="ffill", axis=0).interpolate(method="bfill", axis=0)
elif interpolation_method == "zeroes":
X[0] = X[0].fillna(0)
elif interpolation_method == "hybrid":
X = X.groupby("id").apply(self._hybrid_interpolation)
else:
raise ValueError(
f"{interpolation_method} is not a supported interpolation method. Please choose one from "
f"the following options: [linear, fill, zeroes, hybrid]."
)
return X
@staticmethod
def _convert_to_df(X):
"""Convert the list of np.arrays X into a dataframe compatible with extract_features."""
X_df = pd.DataFrame(data=X)
X_df = X_df.stack(dropna=False)
X_df.index.rename(["id", "time"], inplace=True)
X_df = X_df.reset_index()
return X_df
def _interpolate(self, X_df):
"""Impute missing values through the selected interpolation_method."""
if self.interpolation_method is not None:
X_df = self._impute_ts(X_df, self.interpolation_method)
return X_df
@staticmethod
def _hybrid_interpolation(x):
"""Replace with zeroes any NaNs at the end or start of the sequences and forward fill the remaining NaNs."""
# Compute the index of the first and last non-NaN value
# In case of all NaNs, both first_valid and last_valid are None, and all values get replaced with zeroes
first_valid = x[0].first_valid_index()
last_valid = x[0].last_valid_index()
x.loc[first_valid:last_valid, 0] = x.loc[first_valid:last_valid, 0].interpolate(method="ffill", axis=0)
x[0] = x[0].fillna(0)
return x
def _extract_tsfresh_features(self, X):
X_df = self._convert_to_df(X)
X_df_no_nans = X_df.dropna()
# covering corner case when all nans
if X_df_no_nans.shape[0] == 0:
X_df_no_nans = X_df.loc[[0]].fillna(0)
if self.extraction_type not in ["minimal", "efficient", "all"]:
raise ValueError(
f"{self.extraction_type} is not a supported feature extraction option. Please choose one from "
f"the following options: [minimal, efficient, all]."
)
min_settings = MinimalFCParameters()
# Extract time series features from the dataframe
# Replace any ``NaNs`` and ``infs`` in the extracted features with median/extreme values for that column
tsfresh_features = extract_features(
X_df_no_nans,
default_fc_parameters=min_settings,
column_id="id",
column_sort="time",
impute_function=impute,
n_jobs=N_TSFRESH_JOBS,
)
self.min_settings_card = tsfresh_features.shape[1]
# Minimal features computed indepdently to ensure they go first in the output,
# this is needed to ensure their survival when filtering features
if self.extraction_type in ["efficient", "all"]:
if self.extraction_type == "efficient":
settings = EfficientFCParameters()
else:
settings = ComprehensiveFCParameters()
settings = {k: v for k, v in settings.items() if k not in min_settings}
self._apply_feature_threshold(settings)
if settings:
# check that efficient strategies are not emptied when applying expansion threshold
tsfresh_features_extra = extract_features(
X_df_no_nans,
default_fc_parameters=settings,
column_id="id",
column_sort="time",
impute_function=impute,
n_jobs=N_TSFRESH_JOBS,
)
tsfresh_features = pd.concat([tsfresh_features, tsfresh_features_extra], axis=1)
# If X_df.dropna() dropped some observations entirely (i.e., due to all NaNs),
# impute each tsfresh feature for those observations with the median of that tsfresh feature
tsfresh_features_imputed = impute(tsfresh_features.reindex(pd.RangeIndex(X_df["id"].max() + 1)))
return tsfresh_features_imputed, X_df
def _apply_feature_threshold(self, settings):
"""Accepts a settings dictionary, with all the possible generated features,
and filters features if needed until their count matches the given "self.expansion_threshold"
(minus minimal features).
Does that in a reproducible "random" way, controlled by "self.feature_sampling_seed".
Draws Random indexes to be filtered, then iterates over the settings dictionary assigning an index to each value
and performs the filtering based on that index.
"""
settings.pop("linear_trend_timewise", None) # remove these 5 features that need dateTime indexes for sequences
max_available_features = self._get_features_count(settings)
if self.expansion_threshold >= max_available_features + self.min_settings_card:
return # no need to limit
filter_order = np.arange(max_available_features)
random_state = np.random.get_state()
np.random.seed(self.feature_sampling_seed)
np.random.shuffle(filter_order)
np.random.set_state(random_state)
removed_indices = list(filter_order[max(0, self.expansion_threshold - self.min_settings_card) :])
removed_indices.sort()
feature_idx = 0
for k in list(settings.keys()):
if isinstance(settings[k], list):
survived_list = []
# case the value is a list, each list element is counted separately
for index, _ in enumerate(settings[k]):
if removed_indices and removed_indices[0] == feature_idx:
del removed_indices[0]
else:
survived_list.append(settings[k][index])
feature_idx += 1
# copy the "survived", features to the final list. if no one survived, delete the settings key.
if survived_list:
settings[k] = survived_list
else:
del settings[k]
else:
# case the value is None, count it as one feature
if removed_indices and removed_indices[0] == feature_idx:
del removed_indices[0]
del settings[k]
feature_idx += 1
def _compute_expansion_threshold(self, input_len):
return int(max(ceil(SEQUENCE_EXPANSION_FACTOR * input_len + 1) + 1, 10))
def _more_tags(self):
return {"_skip_test": True, "allow_nan": True}
def _get_features_count(self, settings):
return sum([len(v) if isinstance(v, list) else 1 for v in settings.values()]) | /sagemaker-scikit-learn-extension-2.5.0.tar.gz/sagemaker-scikit-learn-extension-2.5.0/src/sagemaker_sklearn_extension/feature_extraction/sequences.py | 0.841663 | 0.439447 | sequences.py | pypi |
import numpy as np
import scipy.sparse as sp
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import VectorizerMixin, TfidfVectorizer
from sklearn.utils.validation import check_array, check_is_fitted
class MultiColumnTfidfVectorizer(BaseEstimator, VectorizerMixin, TransformerMixin):
"""Applies ``sklearn.feature_extraction.text.TfidfVectorizer`` to each column in an array.
Each column of text is treated separately with a unique TfidfVectorizer. The vectorizers are applied sequentially.
Parameters
----------
strip_accents : {'ascii', 'unicode', None} (default=None)
Remove accents and perform other character normalization during the preprocessing step.
'ascii' is a fast method that only works on characters that have an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
Both 'ascii' and 'unicode' use NFKD normalization from :func:`unicodedata.normalize`.
lowercase : boolean (default=True)
Convert all characters to lowercase before tokenizing.
preprocessor : callable or None (default=None)
Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams
generation steps.
tokenizer : callable or None (default=None)
Override the string tokenization step while preserving the preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
There are several known issues with 'english' and you should consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically
detect and filter stop words based on intra corpus document frequency of terms.
token_pattern : string
Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp
select tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a
token separator).
ngram_range : tuple (min_n, max_n) (default=(1, 1))
The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n
such that min_n <= n <= max_n will be used.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside word boundaries; n-grams at the edges of words
are padded with space.
If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input.
max_df : float in range [0.0, 1.0] or int (default=1.0)
When building the vocabulary ignore terms that have a document frequency strictly higher than the given
threshold (corpus-specific stop words).
If float, the parameter represents a proportion of documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int (default=1)
When building the vocabulary ignore terms that have a document frequency strictly lower than the given
threshold. This value is also called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None (default=1000)
If not None, build a vocabulary that only consider the top max_features ordered by term frequency across
the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional (default=None)
Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an
iterable over terms. If not given, a vocabulary is determined from the input.
dtype : type, optional (default=float64)
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional (default='l2')
Each output row will have unit norm, either:
* 'l2': Sum of squares of vector elements is 1. The cosine similarity between two vectors is their dot product
when l2 norm has been applied.
* 'l1': Sum of absolute values of vector elements is 1.
See :func:`preprocessing.normalize`
use_idf : boolean (default=True)
Enable inverse-document-frequency reweighting.
smooth_idf : boolean (default=True)
Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every
term in the collection exactly once. Prevents zero divisions.
sublinear_tf : boolean (default=False)
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
vocabulary_sizes : list(int) (default=None)
Specify the exact vocabulary size to use while encoding each column in the input dataset. The vocabulary size
of a column corresponds to the number of features in its TF-IDF encoding, before the feature matrices are
concatenated. If the feature matrix of column ``i`` has more features than the corresponding vocabulary size,
only the first ``vocabulary_sizes[i]`` features are kept. If the feature matrix of column ``i`` has fewer
features than the corresponding vocabulary size, zero columns are added to the feature matrix until it has
``vocabulary_sizes[i]`` features. This parameter is useful if the total number of features of the encoding
has to be constant.
ignore_columns_with_zero_vocabulary_size : boolean (default=True)
Allow ValueErrors thrown by ``sklearn.feature_extraction.text.TfidfVectorizer`` because of over-pruning
of terms to be ignored and an empty ``scipy.sparse.csr_matrix`` to be used in place of the given columns
TF-IDF document-term matrix.
Attributes
----------
vectorizers_ : list of ``sklearn.feature_extraction.text.TfidfVectorizers``
List of ``sklearn.feature_extraction.text.TfidfVectorizers``. Each TfidfVectorizer is separately instantiated
on an input column. len(self.vectorizers_) should equal to the number of input columns.
Notes
-----
MultiColumnTfidfVectorizer should be used with 2D arrays of text strings, for 1D arrays of text data, use
``sklearn.feature_extraction.text.TfidfVectorizer`` or reshape using array.reshape(-1, 1)
"""
def __init__(
self,
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1),
analyzer="word",
max_df=1.0,
min_df=1,
max_features=1000,
vocabulary=None,
dtype=np.float64,
norm="l2",
use_idf=True,
smooth_idf=True,
sublinear_tf=False,
vocabulary_sizes=None,
ignore_columns_with_zero_vocabulary_size=True,
):
self.strip_accents = strip_accents
self.lowercase = lowercase
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.stop_words = stop_words
self.token_pattern = token_pattern
self.ngram_range = ngram_range
self.analyzer = analyzer
self.max_df = max_df
self.min_df = min_df
self.max_features = max_features
self.vocabulary = vocabulary
self.dtype = dtype
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
self.vocabulary_sizes = vocabulary_sizes
self.ignore_columns_with_zero_vocabulary_size = ignore_columns_with_zero_vocabulary_size
def _fit_vectorizer(self, col_idx, X):
max_features = self.max_features
# Override max_features for the current column in order to enforce the vocabulary size.
if self.max_features and self.vocabulary_sizes:
max_features = min(self.max_features, self.vocabulary_sizes[col_idx])
elif self.vocabulary_sizes:
max_features = self.vocabulary_sizes[col_idx]
try:
vectorizer = TfidfVectorizer(
strip_accents=self.strip_accents,
lowercase=self.lowercase,
preprocessor=self.preprocessor,
tokenizer=self.tokenizer,
stop_words=self.stop_words,
token_pattern=self.token_pattern,
ngram_range=self.ngram_range,
analyzer=self.analyzer,
max_df=self.max_df,
min_df=self.min_df,
max_features=max_features,
vocabulary=self.vocabulary,
dtype=self.dtype,
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
vectorizer.fit(X[:, col_idx])
except ValueError as err:
zero_vocab_errors = [
"After pruning, no terms remain. Try a lower min_df or a higher max_df.",
"max_df corresponds to < documents than min_df",
"empty vocabulary; perhaps the documents only contain stop words",
]
if str(err) in zero_vocab_errors and self.ignore_columns_with_zero_vocabulary_size:
vectorizer = None
else:
raise
return vectorizer
def fit(self, X, y=None):
"""Build the list of TfidfVectorizers for each column.
Parameters
----------
X : {array-like}, text data
Returns
-------
self : MultiColumnTfidfVectorizer
"""
X = check_array(X, dtype=None)
n_columns = X.shape[1]
# If specified, vocabulary size must be given for each column of the input dataset.
if self.vocabulary_sizes and len(self.vocabulary_sizes) != n_columns:
raise ValueError("If specified, vocabulary_sizes has to have exactly one entry per data column.")
self.vectorizers_ = [self._fit_vectorizer(i, X) for i in range(n_columns)]
return self
def _transform_vectorizer(self, col_idx, X):
if self.vectorizers_[col_idx]:
tfidf_features = self.vectorizers_[col_idx].transform(X[:, col_idx])
# If the vocabulary size is specified and there are too few features, then pad the output with zeros.
if self.vocabulary_sizes and tfidf_features.shape[1] < self.vocabulary_sizes[col_idx]:
tfidf_features = sp.csr_matrix(
(tfidf_features.data, tfidf_features.indices, tfidf_features.indptr),
shape=(tfidf_features.shape[0], self.vocabulary_sizes[col_idx]),
)
return tfidf_features
# If ``TfidfVectorizer`` threw a value error, add an empty TF-IDF document-term matrix for the column
return sp.csr_matrix((X.shape[0], 0))
def transform(self, X, y=None):
"""Transform documents to document term-matrix.
Parameters
----------
X : 2D array of text data
Returns
-------
tfidf_matrix : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, "vectorizers_")
X = check_array(X, dtype=None)
return sp.hstack([self._transform_vectorizer(i, X) for i in range(X.shape[1])])
def _more_tags(self):
return {"X_types": ["string"]} | /sagemaker-scikit-learn-extension-2.5.0.tar.gz/sagemaker-scikit-learn-extension-2.5.0/src/sagemaker_sklearn_extension/feature_extraction/text.py | 0.937619 | 0.615406 | text.py | pypi |
import json
import os
from abc import ABC, abstractmethod
from sys import getsizeof
import mlio
from mlio.integ.numpy import as_numpy
import numpy as np
import psutil
def _convert_bytes_to_megabytes(b):
"""Converts bytes to megabytes"""
return b / 1000 ** 2
def _convert_megabytes_to_bytes(mb):
"""Converts megabytes to bytes"""
return mb * (1000 ** 2)
def _get_size_total(numpy_array):
"""Gets estimated memory usage of numpy array with dtype object in bytes."""
assert numpy_array.dtype.kind == "O"
assert 1 <= numpy_array.ndim <= 2
total = numpy_array.nbytes # Size of reference.
rows = numpy_array if numpy_array.ndim == 2 else [numpy_array]
for row in rows:
total += sum([getsizeof(x) for x in row])
return total
def _used_memory_mb():
"""Returns the current memory usage in megabytes"""
return _convert_bytes_to_megabytes(psutil.virtual_memory().total - psutil.virtual_memory().available)
def _get_data(source):
"""Determines the input mode of the source and returns a InMemoryStore, SageMakerPipe, or File object
based on the input mode.
If source is a python buffer, a mlio.InMemoryStore will be returned.
If SM_INPUT_DATA_CONFIG environment variable is not defined, source is assumed to be a file or directory and a
mlio.File object will be returned.
If SM_INPUT_DATA_CONFIG environment variable is defined, source can be the name of the channel in
SM_INPUT_DATA_CONFIG. If the source is a path, it is assumed that the basename of the path is the name of the
channel. The type of mlio object to be returned will be based on the "TrainingInputMode" of the channel.
Here is an example of SM_INPUT_DATA_CONFIG with two channels ("code" and "train").
SM_INPUT_DATA_CONFIG=
{
"code": {
"ContentType": "application/x-code",
"RecordWrapperType": "None",
"S3DistributionType": "FullyReplicated",
"TrainingInputMode": "File"
},
"train": {
"ContentType": "text/csv",
"RecordWrapperType": "None",
"S3DistributionType": "ShardedByS3Key",
"TrainingInputMode": "File"
}
}
Parameters
----------
source: str or bytes
Name of the SageMaker Channel, File, or directory from which the data is being read or
the Python buffer object from which the data is being read.
Returns
-------
mlio.File:
A mlio.File object is return based on the file or directory described by the `source`.
mlio.SageMakerPipe:
In SageMaker framework containers, the inputdataconfig.json is made available via environment
variable 'SM_INPUT_DATA_CONFIG'. When the given source is a to 'Pipe' the value of the
environment variable 'SM_INPUT_DATA_CONFIG' is used to read out the 'TrainingInputMode' and
confirm that the source is a 'Pipe'. Then a `mlio.SageMakerPipe` object is created using the
'source' and returned.
mlio.InMemoryStore:
Given the `source` is a Python buffer, a mlio.InMemoryStore object is created and returned
"""
if isinstance(source, bytes):
return [mlio.InMemoryStore(source)]
if isinstance(source, mlio.File):
source = source.id
config = os.environ.get("SM_INPUT_DATA_CONFIG")
if config is None:
return mlio.list_files(source, pattern="*")
channels = json.loads(config)
source_channel_name = os.path.basename(source)
try:
channel_config = channels[source_channel_name]
except KeyError:
raise KeyError(
"Configuration for channel name {} is not provided in SM_INPUT_DATA_CONFIG.".format(source_channel_name)
)
try:
data_config_input_mode = channel_config["TrainingInputMode"]
except KeyError:
raise KeyError(
"SM_INPUT_DATA_CONFIG is malformed. TrainingInputMode is "
"not found for channel name {}".format(source_channel_name)
)
if data_config_input_mode == "Pipe":
return [mlio.SageMakerPipe(source)]
return mlio.list_files(source, pattern="*") # 'File' mode
def _get_reader(source, batch_size):
"""Returns 'CsvReader' for the given source
Parameters
----------
source: str or bytes
Name of the SageMaker Channel, File, or directory from which the data is being read or
the Python buffer object from which the data is being read.
batch_size : int
The batch size in rows to read from the source.
Returns
-------
mlio.CsvReader
CsvReader configured with a SageMaker Pipe, File or InMemory buffer
"""
data_reader_params = mlio.DataReaderParams(
dataset=_get_data(source), batch_size=batch_size, warn_bad_instances=False
)
csv_params = mlio.CsvParams(
default_data_type=mlio.DataType.STRING, header_row_index=None, allow_quoted_new_lines=True
)
return mlio.CsvReader(data_reader_params=data_reader_params, csv_params=csv_params)
class AbstractBatchConsumer(ABC):
"""Abstract utility class for consuming batches of columnar data up to a given size limit.
Batches are recorded as numpy arrays as they come in, and concatenated into a final array when enough of them have
been read so that that the resulting array is of size at most `max_size_in_bytes`, or the batches have been
exhausted. This requires implementing an estimate of the size of the final array, which is done in the
`_update_array_size_estimate` method.
Parameters
----------
max_size_in_bytes : int
The maximum size that the resulting numpy array(s) should take up.
target_column_index : int or None
The index of the target column in the incoming batches. If present, column data is split into a separate
array than the remaining "feature" data.
"""
def __init__(self, max_size_in_bytes, target_column_index=None):
self.max_size_in_bytes = max_size_in_bytes
self.target_column_index = target_column_index
# Lists which hold the batch data to concatenate in the end.
self._features_batches = []
self._target_batches = []
# Number of columns to be inferred from first batch.
self._n_columns = 0
# State tracking convenience variables.
self._consumed = False
self._initialized = False
self._split_target = target_column_index is not None
def _initialize_state(self, first_batch):
self._n_columns = len(first_batch)
if self._split_target:
assert (
self.target_column_index < self._n_columns
), f"Invalid target_column_index {self.target_column_index} in data with {self._n_columns} columns."
# Enable loading single-column datasets with self.target_column_index set.
if self._n_columns == 1:
self.target_column_index = None
self._split_target = False
self._initialized = True
def _add_batch(self, batch):
"""Adds batch or truncated batch to the concatenation list.
A batch is truncated if adding it would make the final array exceed the maximum target size.
Parameters
----------
batch : mlio Example
An MLIO batch returned by CsvReader, with data encoded as strings.as an Example class. Can be used to
easily iterate over columns of data in batch.
Returns
-------
bool
True if adding batches should continue, False otherwise.
"""
# Perform initialization on first batch.
if not self._initialized:
self._initialize_state(batch)
# Construct numpy representation for data in batch.
features_array_data = self._construct_features_array_data(batch)
target_array_data = self._construct_target_array_data(batch)
# Update size estimation variables.
batch_nbytes_estimate, total_nbytes_estimate = self._update_array_size_estimate(
features_array_data, target_array_data
)
# If the resulting array will be too large, truncate the last batch so that it fits.
should_continue = True
if total_nbytes_estimate > self.max_size_in_bytes:
batch_bytes_to_keep = batch_nbytes_estimate - (total_nbytes_estimate - self.max_size_in_bytes)
fraction_of_batch_rows_to_keep = batch_bytes_to_keep / batch_nbytes_estimate
n_rows_to_keep = int(fraction_of_batch_rows_to_keep * self._n_rows(features_array_data))
if n_rows_to_keep > 0:
features_array_data = self._resize_features_array_data(features_array_data, n_rows_to_keep)
if self._split_target:
target_array_data = self._resize_target_array_data(target_array_data, n_rows_to_keep)
should_continue = False
self._extend_features_batches(features_array_data)
if self._split_target:
self._extend_target_batches(target_array_data)
return should_continue
def consume_reader(self, reader):
"""Reads batches from reader and returns array of size less than or equal to the indicated limit.
Parameters
----------
reader : mlio.CsvReader
A new reader instance from which to load the data.
Returns
-------
numpy array or tuple of numpy arrays
A single numpy array is returned when `target_column_index` is None.
A tuple of numpy arrays is returned when `target_column_index` is not None. The first element of the tuple
is a 2D numpy array containing all columns except the one corresponding to `target_column_index`.
The second element of the tuple is 1D numpy array corresponding to `target_column_index`.
"""
if self._consumed:
raise RuntimeError("This instance has already been used to consume a batch reader.")
for batch in reader:
should_continue = self._add_batch(batch)
if not should_continue:
break
self._consumed = True
return self._concatenate_data()
@abstractmethod
def _concatenate_data(self):
"""Concatenates recorded batches into final numpy array(s).
Returns
-------
numpy array or tuple of numpy arrays
A single numpy array is returned when `target_column_index` is None.
A tuple of numpy arrays is returned when `target_column_index` is not None. The first element of the tuple
is a 2D numpy array containing all columns except the one corresponding to `target_column_index`.
The second element of the tuple is 1D numpy array corresponding to `target_column_index`.
"""
@abstractmethod
def _construct_features_array_data(self, batch):
"""Constructs a data structure containing feature column numpy arrays from the batch.
Parameters
----------
batch : mlio Example
An MLIO batch returned by CsvReader, with data encoded as strings.as an Example class. Can be used to
easily iterate over columns of data in batch.
Returns
-------
Feature data encoded as np.array(s).
"""
@abstractmethod
def _construct_target_array_data(self, batch):
"""Constructs a numpy array with the target column data in the batch.
Parameters
----------
batch : mlio batch as an Example class -- can be used to easily iterate over columns of data in batch.
Returns
-------
np.array or None
None is returned if `target_column_index` is None.
"""
@abstractmethod
def _extend_features_batches(self, features_array_data):
"""Saves `features_array_data` into `self._features_batches`.
Parameters
----------
features_array_data : np.array or list of np.array
Feature columns from data batch processed into numpy array(s).
Returns
-------
None
"""
@abstractmethod
def _extend_target_batches(self, target_array_data):
"""Saves `target_array_data` into `self._target_batches`.
Parameters
----------
target_array_data: np.array or None
Target column from data batch processed into numpy array. Can be None if the `target_column_index` parameter
has not been specified.
Returns
-------
None
"""
@abstractmethod
def _n_rows(self, features_array_data):
"""Returns the number of rows in `features_array_data`.
Parameters
----------
features_array_data : np.array or list of np.array
Feature columns from data batch processed into numpy array(s).
Returns
-------
int
Number of rows in incoming batch.
"""
@abstractmethod
def _resize_features_array_data(self, features_array_data, n_rows_to_keep):
"""Truncates feature numpy array data to `n_rows_to_keep`.
Parameters
----------
features_array_data : np.array or list of np.array
Feature columns from data batch processed into numpy array(s).
n_rows_to_keep : int
Returns
-------
Truncated feature numpy array data.
"""
def _resize_target_array_data(self, target_array_data, n_rows_to_keep):
"""Truncates target numpy array to `n_rows_to_keep`
Parameters
----------
target_array_data: np.array or None
Target column from data batch processed into numpy array. Can be None if the `target_column_index` parameter
has not been specified.
n_rows_to_keep : int
Returns
-------
np.array
Truncated array slice.
"""
return target_array_data[:n_rows_to_keep]
@abstractmethod
def _update_array_size_estimate(self, features_array_data, target_array_data):
"""Updates internal state required to estimate the size of the final array.
This estimate will vary depending on the storage mechanism of the batches as they are being read, and the
format of the final array.
Parameters
----------
features_array_data : np.array or list of np.array
Feature columns from data batch processed into numpy array(s).
target_array_data: np.array or None
Target column from data batch processed into numpy array. Can be None if the `target_column_index` parameter
has not been specified.
Returns
-------
tuple of ints
Tuple consisting of estimated size of batch in final array, and estimated total size of final array. Both
values are returned in bytes.
"""
class ObjectBatchConsumer(AbstractBatchConsumer):
"""Utility class which reads incoming batches as-is and concatenates them without casting into a specific dtype.
Since batches come in with dtype object (that's what `default_data_type=mlio.DataType.STRING` does in `_get_reader`
above), the result will also be an array with dtype object.
"""
def __init__(self, max_size_in_bytes, target_column_index=None):
super().__init__(max_size_in_bytes, target_column_index)
# Average amount of memory in one row (references included), estimated from the first batch.
self._row_nbytes = 0
self._estimated_size_in_bytes = 0
def _initialize_state(self, first_batch):
super()._initialize_state(first_batch)
# Estimate the size of items in each column using the first batch.
for i in range(self._n_columns):
column = as_numpy(first_batch[i]).flatten()
self._row_nbytes += _get_size_total(column) / column.shape[0]
def _concatenate_data(self):
"""Concatenates feature and target data arrays into the final array(s)."""
if self._features_batches:
feature_data = np.concatenate(self._features_batches)
else:
feature_data = np.array([])
if self._split_target:
if self._target_batches:
target_data = np.concatenate(self._target_batches)
else:
target_data = np.array([])
return feature_data, target_data
return feature_data
def _construct_features_array_data(self, batch):
"""Stacks numpy columns created from an incoming data batch into a numpy array."""
return np.column_stack(
[
as_numpy(batch[column_index]).flatten()
for column_index in range(self._n_columns)
if column_index != self.target_column_index
]
)
def _construct_target_array_data(self, batch):
if self._split_target:
return as_numpy(batch[self.target_column_index]).flatten()
return None
def _extend_features_batches(self, features_array_data):
"""Appends the numpy array created from an incoming batch to the features batch list."""
self._features_batches.append(features_array_data)
def _extend_target_batches(self, target_array_data):
"""Appends the numpy array created from an incoming batch to the target batch list."""
self._target_batches.append(target_array_data)
def _n_rows(self, features_array_data):
"""Returns the number of rows in feature data extracted from batch. """
return features_array_data.shape[0]
def _resize_features_array_data(self, features_array_data, n_rows_to_keep):
"""Truncates the incoming feature data batch to a length of `n_rows_to_keep`."""
return features_array_data[:n_rows_to_keep]
def _update_array_size_estimate(self, features_array_data, target_array_data):
"""Estimates the size of the final dataset using row sizes obtained from first batch."""
batch_nbytes = self._row_nbytes * self._n_rows(features_array_data)
self._estimated_size_in_bytes += batch_nbytes
return batch_nbytes, self._estimated_size_in_bytes
class StringBatchConsumer(AbstractBatchConsumer):
"""Utility class which reads incoming batches and returns the final array(s) with dtype string.
This class consumes batches produced by MLIO's CsvReader. As each batch is consumed, we estimate the size of the
array with dtype string that would be produced by concatenating the batches so far. That is then compared against
the limit in bytes to determine whether to stop consuming the batches.
Note that memory usage might be smaller than `max_size_in_bytes`, because the final array's size estimate is based
on the max itemsize encountered in any batch, and a portion of the last batch may be discarded.
`self._features_batches` is a list of lists. Each sublist corresponds to a feature column in the input
dataset, and contains numpy arrays of the data that came in for that column in each batch.
"""
def __init__(self, max_size_in_bytes, target_column_index=None):
super().__init__(max_size_in_bytes, target_column_index)
# Total number of items loaded so far.
self._features_size = 0
self._target_size = 0
# Maximum itemsizes encountered so far.
self._max_features_itemsize = 0
self._target_itemsize = 0
def _initialize_state(self, first_batch):
super()._initialize_state(first_batch)
# The number of feature (non-target) columns.
self._n_features = self._n_columns - (1 if self._split_target else 0)
# self._features_batches[i] contains numpy arrays containing the data from feature column i in each batch.
for _ in range(self._n_features):
self._features_batches.append([])
# Maintain a separate itemsize for each column.
self._features_itemsizes = [0 for _ in range(self._n_features)]
def _concatenate_data(self):
"""Concatenates individual columns, and stacks them into a larger array."""
# Replace batched columns in `self._features_batches` with a concatenated version one at a time.
if self._features_batches and self._features_batches[0]:
for i in range(self._n_features):
self._features_batches[i] = np.concatenate(self._features_batches[i])
features_data = np.column_stack(self._features_batches)
else:
features_data = np.array([]).astype(str)
if self._split_target:
if self._target_batches:
target_data = np.concatenate(self._target_batches)
else:
target_data = np.array([])
return features_data, target_data
return features_data
def _construct_features_array_data(self, batch):
"""Creates a list of `self._n_features` arrays containing data from each column in the batch.
Note that the arrays are interpreted as strings here, in order to easily extract itemsize and estimate size.
"""
return [
as_numpy(batch[i]).flatten().astype(str) for i in range(self._n_columns) if i != self.target_column_index
]
def _construct_target_array_data(self, batch):
if self._split_target:
return as_numpy(batch[self.target_column_index]).flatten().astype(str)
return None
def _extend_features_batches(self, features_array_data):
"""Appends the numpy arrays created from an incoming batch to the features batch list."""
for i, column_batch in enumerate(features_array_data):
self._features_batches[i].append(column_batch)
def _extend_target_batches(self, target_array_data):
"""Appends the target numpy array created from an incoming batch to the target batch list."""
self._target_batches.append(target_array_data)
def _n_rows(self, features_array_data):
"""Returns the number of rows in feature data extracted from batch. """
return features_array_data[0].shape[0] if features_array_data else 0
def _resize_features_array_data(self, features_array_data, n_rows_to_keep):
"""Truncates each feature's incoming data to a length of `n_rows_to_keep`."""
return [column[:n_rows_to_keep] for column in features_array_data]
def _update_array_size_estimate(self, features_array_data, target_array_data):
"""Estimates the size of the final array when the incoming array data is added to it."""
feature_batch_size = 0
for i in range(self._n_features):
feature_column_array = features_array_data[i]
self._features_itemsizes[i] = max(self._features_itemsizes[i], feature_column_array.itemsize)
self._max_features_itemsize = max(self._features_itemsizes[i], self._max_features_itemsize)
feature_batch_size += feature_column_array.size
self._features_size += feature_batch_size
batch_size_in_bytes = feature_batch_size * self._max_features_itemsize
total_size_in_bytes = self._features_size * self._max_features_itemsize
if self._split_target:
self._target_itemsize = max(target_array_data.itemsize, self._target_itemsize)
self._target_size += target_array_data.size
batch_size_in_bytes += target_array_data.size * self._target_itemsize
total_size_in_bytes += self._target_itemsize * self._target_size
return batch_size_in_bytes, total_size_in_bytes
def _read_to_fit_memory(reader, max_memory_bytes, target_column_index=None, output_dtype="O"):
"""Reads batches from reader until a numpy array of size up to `max_memory_bytes` is returned.
The array will dtype.kind 'U' if output_dtype is 'U', and dtype.kind 'O' otherwise.
Parameters
----------
reader : mlio.CsvReader
MLIO reader yielding data batches as Examples -- collections of tensors that can be cast to numpy arrays.
max_memory_mb : int
Maximum total memory usage in bytes of the returned array(s).
target_column_index : int or None
Index of target column in the input dataset. If not None, data in the corresponding column of the CSV being
read will be separated into a 1D numpy array.
output_dtype : string
If this value is 'U', then the returned numpy array(s) will have dtype.kind = 'U'. Otherwise,
the return array(s) will have dtype.kind = 'O'.
Returns
-------
numpy array or tuple of numpy arrays
A single numpy array is returned when `target_column_index` is None.
A tuple of numpy arrays is returned when `target_column_index` is not None. The first element of the tuple
is a 2D numpy array containing all columns except the one corresponding to `target_column_index`.
The second element of the tuple is 1D numpy array corresponding to `target_column_index`.
"""
if output_dtype == "U":
reader_consumer = StringBatchConsumer(max_memory_bytes, target_column_index)
else:
reader_consumer = ObjectBatchConsumer(max_memory_bytes, target_column_index)
return reader_consumer.consume_reader(reader)
def read_csv_data(
source: str or bytes,
batch_size: int = 1000,
fit_memory_percent: float = 20.0,
target_column_index: int = None,
output_dtype: str = "O",
):
"""Reads comma separated data and returns a tuple of numpy arrays.
This function reads the csv data from either a `SageMakerPipe`, `File`, or `InMemoryStore` buffer.
If `fit_memory_percent` is set to a positive threshold, it identifies the number of samples that can be loaded to
fit in the requested percentage of the memory.
Parameters
-------
source : str or bytes
The source must correspond to one of the following:
'File':
This should be used if data is being read through a file or directory. If used, the
'source' should be the file or directory's path.
'Pipe':
A 'Pipe' streams data directly from Amazon S3 to a container. If a 'Pipe' is used,
the 'source' should be the name of the desired SageMaker channel.
For more information on 'Pipe' mode see:
https://aws.amazon.com/blogs/machine-learning/using-pipe-input-mode-for-amazon-sagemaker-algorithms/
'InMemory':
This should be used when the data is being read in bytes through an in-memory Python buffer.
If used, 'source' should be the Python buffer object.
batch_size : int
The batch size in rows to read from the source.
fit_memory_percent : float
Sample down the examples to use the maximum percentage of the available memory.
target_column_index : int or None
Index of target column in the input dataset. If not None, data in the corresponding column of the CSV being
read will be separated into a 1D numpy array.
output_dtype : string
If this value is 'U', then the returned numpy array(s) will have dtype.kind = 'U'. Otherwise,
the return array(s) will have dtype.kind = 'O'.
Returns
-------
numpy array or tuple of numpy arrays
A single numpy array is returned when `target_column_index` is None.
A tuple of numpy arrays is returned when `target_column_index` is not None. The first element of the tuple
is a 2D numpy array containing all columns except the one corresponding to `target_column_index`.
The second element of the tuple is 1D numpy array corresponding to `target_column_index`.
"""
max_memory_bytes = psutil.virtual_memory().total * (fit_memory_percent / 100)
return _read_to_fit_memory(
_get_reader(source, batch_size),
max_memory_bytes,
target_column_index=target_column_index,
output_dtype=output_dtype,
) | /sagemaker-scikit-learn-extension-2.5.0.tar.gz/sagemaker-scikit-learn-extension-2.5.0/src/sagemaker_sklearn_extension/externals/read_data.py | 0.771672 | 0.520374 | read_data.py | pypi |
import numpy as np
from scipy.sparse import isspmatrix
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
class AutoMLTransformer(BaseEstimator, TransformerMixin):
"""Utility class encapsulating feature and target transformation functionality used in AutoML pipelines.
Parameters
----------
header : Header instance
Instance of the ``Header`` class from ``sagemaker_sklearn_extension.externals``. Contains indices of the
features and response in the corresponding dataset.
feature_transformer : transformer instance
A Scikit-Learn transformer used on the feature columns in the dataset. Should have ``fit`` and ``transform``
methods which accept 2-dimensional inputs.
target_transformer : transformer instance
A Scikit-Learn transformer used on the target column in the dataset. Should have ``fit``, ``transform``, and
optionally ``inverse_transform`` methods which accept 1-dimensional inputs.
"""
def __init__(self, header, feature_transformer, target_transformer):
self.header = header
self.feature_transformer = feature_transformer
self.target_transformer = target_transformer
def fit(self, X, y):
"""Fit and transform target, then fit feature data using the underlying transformers.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
The feature-only dataset.
y : numpy array of shape [n_samples]
The target column.
Returns
-------
self : AutoMLTransformer
"""
y_transformed = y
if self.target_transformer:
y_transformed = self.target_transformer.fit_transform(y)
self.feature_transformer.fit(X, y_transformed)
return self
def transform(self, X):
"""Transform the dataset using the underlying transformers.
Depending on the shape of the input, it transforms either the feature data, or the feature data and the target
column and then concatenates them back into a single dataset.
Parameters
----------
X : numpy array
The array to transform whose shape should be either:
- [n_samples, n_features], if it only contains the features; or
- [n_samples, n_features + 1], if it contains the feature columns and the target column.
Returns
-------
array-like of shape [n_samples, n_transformed_features] or [n_samples, n_transformed_features + 1]
"""
n_columns = X.shape[1]
n_features = len(self.header.feature_column_indices)
# X contains both features and response.
if n_columns == n_features + 1:
y = X[:, self.header.target_column_index]
y_transformed = self.label_transform(y)
non_nan_indices = np.arange(y_transformed.shape[0])[~np.isnan(y_transformed)]
feature_indices = np.array(self.header.feature_column_indices)
X_transformed = self.feature_transformer.transform(
X[non_nan_indices[:, np.newaxis], feature_indices[np.newaxis, :]]
)
y_transformed_no_nans = y_transformed[non_nan_indices]
return np.column_stack((y_transformed_no_nans, self._dense_array(X_transformed)))
# X contains only the features.
if n_columns == n_features:
return self.feature_transformer.transform(X)
raise ValueError(
f"Received data of unknown size. Expected number of columns is {n_features}. "
f"Number of columns in the received data is {n_columns}."
)
def label_transform(self, y):
"""Apply transformation, if ``target_transformer`` has been specified.
Parameters
----------
y : array-like, 1-dimensional
Returns
-------
array-like
The transformed data. If target transformer has not been specified, simply returns the input.
"""
if self.target_transformer:
return self.target_transformer.transform(y)
return y.astype("float32")
def inverse_label_transform(self, yt):
"""Apply inverse target transformation, if ``target_transformer`` has been specified set.
Parameters
----------
yt : array-like, 1-dimensional
Returns
-------
array-like
The inverse-transformed target. If target transformer has not been specified, simply returns the input.
"""
if not self.target_transformer:
return yt
return self.target_transformer.inverse_transform(yt)
@staticmethod
def _dense_array(arr):
"""Converts the input array to dense array.
Parameters
----------
arr : numpy array or csr_matrix
The array to be densified.
Returns
-------
array-like
Dense numpy array representing arr.
"""
if isspmatrix(arr):
return arr.todense()
return arr | /sagemaker-scikit-learn-extension-2.5.0.tar.gz/sagemaker-scikit-learn-extension-2.5.0/src/sagemaker_sklearn_extension/externals/automl_transformer.py | 0.948656 | 0.775137 | automl_transformer.py | pypi |
from itertools import combinations
import numpy as np
from scipy.sparse import issparse
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_array
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.validation import FLOAT_DTYPES
class QuadraticFeatures(BaseEstimator, TransformerMixin):
"""Generate and add quadratic features to feature matrix.
Generate a new feature matrix containing the original data, an optional bias column, a collection of squared
features, and a collection of interaction terms. If ``max_n_features`` is not large enough to include all the
squared features, then a random subset of them is added instead. If it is large enough to include all squared
features, but not large enough to include all quadratic features, then all of the squared features and a random
subset of the interaction features are added instead.
This transformer is similar to ``PolynomialFeatures`` from the ``sklearn.preprocessing.data`` module.
Parameters
----------
include_bias : boolean (default = False)
Whether to include a bias column -- the feature in which all entries are set to 1.0, and which acts as the
intercept term in a linear model. Note that this parameter is False by default, in contrast to the corresponding
parameter in ``sklearn``'s ``PolynomialFeatures``.
interaction_only : boolean (default = False)
Whether to produce only interaction features, and omit the squared features. For example, if the features are
[a, b], then this will include ab, but not a^2 and b^2. The bias column is not affected by this parameter.
max_n_features : int (default = 1000)
The maximum number of features to include in the output data matrix. Squared features are prioritized over
interaction features, unless ``interaction_only`` is ``True``. Must be larger than the number of input features
(plus one, if ``include_bias`` is ``True``).
order : str in {'C', 'F'} (default = 'C')
Order of the input array: 'C' stands for C-contiguous order, and 'F' stands for Fortran-contiguous order.
random_state : int, RandomState instance, or None (default = 0)
If int, ``random_state`` is the seed used by the random number generator; if ``RandomState`` instance,
``random_state`` is the random number generator; if None, the random number generator is the ``RandomState``
instance used by ``np.random``. Used to determine which feature combinations to include in the output dataset
when ``max_n_features`` is too small to fit all quadratic features.
Examples
--------
>>> import numpy as np
>>> from sagemaker_sklearn_extension.preprocessing import QuadraticFeatures
>>> X = np.arange(1, 7).reshape((2, 3))
>>> X
array([[1, 2, 3],
[4, 5, 6]])
>>> QuadraticFeatures().fit_transform(X)
array([[ 1, 2, 3, 1, 4, 9, 2, 3, 6],
[ 4, 5, 6, 16, 25, 36, 20, 24, 30]])
>>> QuadraticFeatures(interaction_only=True, max_n_features=5).fit_transform(X)
array([[ 1, 2, 3, 2, 3],
[ 4, 5, 6, 20, 24]])
Attributes
----------
combinations_ : list of tuples (i, j)
List of tuples with two elements, each containing the indexes of the columns that are multiplied element-wise
to form a single output column. Tuples appear in the same order as the corresponding output columns.
n_input_features_ : int
The number of columns in the input dataset.
n_output_features_ : int
The number of columns in the output dataset.
Notes
-----
Accepts only two-dimensional, dense input arrays.
"""
def __init__(self, include_bias=False, interaction_only=False, max_n_features=1000, order="C", random_state=0):
self.include_bias = include_bias
self.interaction_only = interaction_only
self.max_n_features = max_n_features
self.order = order
self.random_state = random_state
def _build_combinations(self, n_features, random_state):
"""Calculate the feature pairs to be added to the input data based on parameters and number of input columns.
If ``interaction_only`` is ``True``, all squared features are omitted. Otherwise, they are added before
interaction features. If there is enough space--as indicated by ``max_n_features``--to add all squared features,
then do so. Otherwise, take a random sub-sample. Then, if there's enough space to add all interaction features,
do so. Otherwise, return a random sub-sample of those.
Parameters
----------
n_features : int
The number of columns in the input vector.
random_state : RandomState
The prepared (using ``check_random_state``) ``RandomState`` instance.
"""
# First calculate how many new features of each kind (squared and interaction) we can add.
added_feature_budget = self.max_n_features - n_features - int(self.include_bias)
if added_feature_budget <= 0:
message = "max_n_features must be large enough for the output to contain more than the original dataset"
if self.include_bias:
message += " and bias column"
raise ValueError(message)
squared_feature_budget = 0 if self.interaction_only else min(added_feature_budget, n_features)
interaction_feature_budget = max(0, added_feature_budget - squared_feature_budget)
# Produce squared feature pairs.
squared_features = []
if squared_feature_budget == n_features:
# No need to reorder if we can fit all squared features.
squared_features = [(i, i) for i in range(n_features)]
elif squared_feature_budget > 0:
# Otherwise, take a random sample of them.
squared_features = [
(i, i) for i in random_state.choice(range(n_features), size=squared_feature_budget, replace=False)
]
# Produce interaction feature pairs.
interaction_features = []
if interaction_feature_budget > 0:
interaction_features = list(combinations(range(n_features), 2))
# Take a random sample of feature interactions if not all can fit.
if len(interaction_features) > interaction_feature_budget:
random_state.shuffle(interaction_features)
interaction_features = interaction_features[:interaction_feature_budget]
return squared_features + interaction_features
def fit(self, X, y=None):
"""
Compute the number of output features and the combination of input features to multiply.
Parameters
----------
X : array-like , shape (n_samples, n_features)
The data array to transform. Must be a non-sparse two-dimensional numpy array.
Returns
-------
self : instance
"""
_, n_features = check_array(X).shape
random_state = check_random_state(self.random_state)
self.combinations_ = self._build_combinations(n_features, random_state)
self.n_input_features_ = n_features
self.n_output_features_ = n_features + len(self.combinations_) + int(self.include_bias)
return self
def transform(self, X):
"""
Transform data to the chosen quadratic features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data array to transform. Must be a non-sparse and two-dimensional.
Returns
-------
XQ : np.ndarray, shape (n_samples, n_output_features_)
The array of computed features.
"""
check_is_fitted(self, ["n_input_features_", "n_output_features_", "combinations_"])
X = check_array(X, order=self.order)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape.")
XQ = np.empty((n_samples, self.n_output_features_), dtype=X.dtype, order=self.order)
if self.include_bias:
XQ[:, 0] = 1.0
X_col_range_start, X_col_range_end = 1, self.n_input_features_ + 1
else:
X_col_range_start, X_col_range_end = 0, self.n_input_features_
XQ[:, X_col_range_start:X_col_range_end] = X
XQ[:, X_col_range_end:] = np.column_stack([X[:, i] * X[:, j] for i, j in self.combinations_])
return XQ
class RobustStandardScaler(BaseEstimator, TransformerMixin):
"""Scaler to adaptively scale dense and sparse inputs.
RobustStandardScaler uses `sklearn.preprocessing.StandardScaler` to perform standardization, but adapts
the centering based on the sparsity of the data.
For dense inputs, the standard score of a sample `x` is calculated as:
z = (x - u) / s
where `u` is the mean of the training samples, and `s` is the standard deviation of the training samples.
The mean `u` is a vector of means of each feature. If the number of zeros for a feature is greater than or
equal to 70% of the total number of samples, the corresponding value in `u` is set to `0` to avoid centering
by mean.
For sparse inputs, the standard score of a sample `x` is calculated as:
z = x / s
where `s` is the standard deviation of the training samples.
Parameters
----------
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
self.scaler_ : ``sklearn.preprocessing.StandardScaler``
- `scaler_` is instantiated inside the fit method used for computing the center and the standard deviation.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Fit RobustStandardScaler to X.
If input is sparse, `fit` overrides `self.with_mean` to standardize without subtracting mean (avoids breaking
for sparse matrix)
If the data is dense, the mean is adjusted for sparse features and the scaled with mean.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to standardize.
Returns
-------
self : RobustStandardScaler
"""
X = check_array(
X, accept_sparse=("csr", "csc"), estimator=self, dtype=FLOAT_DTYPES, force_all_finite="allow-nan"
)
with_mean = not issparse(X)
self.scaler_ = StandardScaler(with_mean=with_mean, with_std=True, copy=self.copy)
self.scaler_.fit(X)
if self.scaler_.with_mean:
nnz_mean_mask = np.where(np.count_nonzero(X, axis=0) / X.shape[0] > 0.3, 1, 0)
self.scaler_.mean_ = self.scaler_.mean_ * nnz_mean_mask
return self
def transform(self, X):
"""
Standardize data by centering and scaling.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data array to transform.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The array of transformed input.
"""
return self.scaler_.transform(X)
def _more_tags(self):
return {"allow_nan": True} | /sagemaker-scikit-learn-extension-2.5.0.tar.gz/sagemaker-scikit-learn-extension-2.5.0/src/sagemaker_sklearn_extension/preprocessing/data.py | 0.939004 | 0.687007 | data.py | pypi |
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_array, check_is_fitted
from sklearn.preprocessing import QuantileTransformer, quantile_transform
def log_transform(x):
"""Apply a log-like transformation.
The transformation is log(x + 1) if all x >= 0, else it is a custom symmetric log transform: shifted log,
mirrored around the origin, so that the domain is all real numbers and the sign of the input is preserved.
It is a monotonic transformation.
"""
if np.all(x >= 0):
return np.log(x + 1)
return np.sign(x) * np.log(np.abs(x) + 1)
def quantile_transform_nonrandom(x):
"""Apply ``sklearn.preprocessing.quantile_transoform``.
Converts column with extreme values to a uniform distribution. random_state seed is always 0.
"""
return quantile_transform(x.reshape((-1, 1)), random_state=0, copy=True)
def identity(x):
"""Identity function."""
return x
class BaseExtremeValueTransformer(BaseEstimator, TransformerMixin):
"""Applies a transformation to columns which have "extreme" values.
A value is considered "extreme" if it is greater than ``quantile`` or less than 100 - ``quantile`` percent of the
data, and is more than ``threshold_std`` many standard deviations away from the mean. Heavy-tailed distributions are
therefore more likely to have "extreme" values.
Number of output columns is the same as number of input columns: each column is either transformed or not.
The default transformation is the identity function.
Parameters
----------
quantile : int (default = 98)
Used to calculate the lower and upper cutoff quantiles for a value to be considered "extreme".
This must be an integer between 0 and 100.
threshold_std : float (default = 4.0)
Number of standard deviations away from the mean (in standard units). For a given column, if the magnitude of
the quantile cutoffs is greater than the threshold_std cutoff, then that column contains an extreme value.
``threshold_std`` is converted to nonstandard units:
``nonstandard_thresholds = standard_threshold * np.std(X, axis=0) + np.mean(X, axis=0)``.
transform_function : transform_function : callable -> 1D np.array (default = lambda x: x)
The function to transform the columns with extreme values. transform_function is applied to an entire column
if that column contains an "extreme" value. `transform_function` is applied during the `transform` stage.
No state will be kept between ``fit`` and ``transform``. To keep state, create a child class of
``BaseExtremeValueTransformer``.
Attributes
----------
n_input_features_ : int
The number of columns in the input dataset.
quantiles_ : 2D array (2, n_input_features_)
For each column j, ``quantiles_[0, j]`` is the valueof the ``(100 - quantile)`` percentile and
``quantiles_[1, j]`` is the value of the ``quantile`` percentile.
cols_to_transform_ : list of int
List of column indices to determine which columns to apply the transformation of ``transform_function``.
Notes
-----
Accepts only two-dimensional, dense input arrays.
This class can be called directly if inputs in ``fit`` and ``transform`` stages are the same.
Users can also subclass this class and override the ``fit`` and ``_transform_function`` methods to store state as
class attributes. To see examples of this implementation, see
``sagemaker_sklearn_extension.preprocessing.LogExtremeValueTransformer`` or
``sagemaker_sklearn_extension.preprocessing.QuantileExtremeValueTransformer``.
"""
def __init__(self, quantile=98, threshold_std=4.0, transform_function=identity):
self.quantile = quantile
self.threshold_std = threshold_std
self.transform_function = transform_function
def fit(self, X, y=None):
"""Compute the lower and upper quantile cutoffs and which columns to transform.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data array to transform. Must be numeric, non-sparse, and two-dimensional.
Returns
-------
self : BaseExtremeValueTransformer
"""
if not 0 <= self.quantile <= 100:
raise ValueError(
"Parameter `quantile` {} is invalid. `quantile` must be an integer between 0 and 100".format(
self.quantile
)
)
X = check_array(X)
_, self.n_input_features_ = X.shape
self.quantiles_ = np.percentile(X, [100 - self.quantile, self.quantile], axis=0)
nonstandard_threshold_stds = self.threshold_std * np.std(X, axis=0)
col_means = np.mean(X, axis=0)
threshold_upper_bound = nonstandard_threshold_stds + col_means
threshold_lower_bound = -nonstandard_threshold_stds + col_means
self.cols_to_transform_ = [
j
for j in range(self.n_input_features_)
if self.quantiles_[0, j] < threshold_lower_bound[j] or self.quantiles_[1, j] > threshold_upper_bound[j]
]
return self
def transform(self, X, y=None):
"""Transform columns that contain extreme values with ``transform_function``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data array to transform. Must be numeric, non-sparse, and two-dimensional.
Returns
-------
Xt : np.ndarray, shape (n_samples, n_features)
The array of transformed input.
"""
check_is_fitted(self, ["quantiles_", "cols_to_transform_"])
X = check_array(X)
_, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape.")
return_cols = [
self._transform_function(X[:, j], j) if j in self.cols_to_transform_ else X[:, j]
for j in range(self.n_input_features_)
]
return np.column_stack(return_cols)
def _transform_function(self, x, idx=None):
"""Applies ``self.transform_function`` to a column x.
Parameters
----------
x : 1D column, array-like
idx : index, int
index of 1D column in relation to the 2D array.
Returns
-------
xt : transformed x
"""
return self.transform_function(x)
class LogExtremeValuesTransformer(BaseExtremeValueTransformer):
"""Applies a log transformation to columns which have "extreme" values.
The transformation is log(x + 1) if all x >= 0, else it is a custom symmetric log transform: shifted log,
mirrored around the origin, so that the domain is all real numbers and the sign of the input is preserved.
Nonnegative columns are determined during ``fit`` and stored as state, which are then used in ``transform``.
A value is considered "extreme" if it is greater than ``quantile`` or less than 100 - ``quantile`` percent of the
data, and is more than ``threshold_std`` many standard deviations away from the mean. Heavy-tailed distributions are
therefore more likely to have "extreme" values.
Number of output columns is the same as number of input columns: each column is either transformed or not.
Parameters
----------
quantile : int (default = 98)
Used to calculate the lower and upper cutoff quantiles for a value to be considered "extreme".
This must be an integer between 0 and 100.
threshold_std : float (default = 4.0)
Number of standard deviations away from the mean (in standard units). For a given column, if the magnitude of
the quantile cutoffs is greater than the threshold_std cutoff, then that column contains an extreme value.
``threshold_std`` is converted to nonstandard units:
``nonstandard_thresholds = standard_threshold * np.std(X, axis=0) + np.mean(X, axis=0)``.
Attributes
----------
n_input_features_ : int
The number of columns in the input dataset.
quantiles_ : 2D array (2, n_input_features_)
For each column j, ``quantiles_[0, j]`` is the valueof the ``(100 - quantile)`` percentile and
``quantiles_[1, j]`` is the value of the ``quantile`` percentile.
cols_to_transform_ : list of int
List of column indices to determine which columns to apply the transformation of ``transform_function``.
nonnegative_cols_ : list of int
List of column indices that contain all non-negative values.
Notes
-----
Accepts only two-dimensional, dense input arrays.
This class inhereits from ``sagemaker_sklearn_extension.preprocessing.BaseExtremeValueTransformer``.
"""
def __init__(self, quantile=98, threshold_std=4.0):
super().__init__(quantile=quantile, threshold_std=threshold_std)
def fit(self, X, y=None):
"""Compute the lower and upper quantile cutoffs, columns to transform, and nonnegative columns.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data array to transform. Must be numeric, non-sparse, and two-dimensional.
Returns
-------
self : LogExtremeValueTransformer
"""
super().fit(X)
X = check_array(X)
self.nonnegative_cols_ = [j for j in range(self.n_input_features_) if np.all(X[:, j] >= 0)]
return self
def _transform_function(self, x, idx=None):
"""Apply a log-like transformation.
The transformation is log(x + 1) if all x >= 0, else it is a custom symmetric log transform: shifted log,
mirrored around the origin. Uses ``nonnegative_cols_`` from ``fit`` to determine which columns are negative.
"""
if idx in self.nonnegative_cols_:
return np.log(x + 1)
return np.sign(x) * np.log(np.abs(x) + 1)
class QuantileExtremeValuesTransformer(BaseExtremeValueTransformer):
"""Applies a quantile transformation to columns which have "extreme" values.
The quantile transformation is ``sklearn.preprocessing.quantile_transform`` that converts columns with extreme
values to a uniform distribution. Quantiles are computed during the ``fit`` stage and stored as state, which are
then used in ``transform``.
A value is considered "extreme" if it is greater than ``quantile`` or less than 100 - ``quantile`` percent of the
data, and is more than ``threshold_std`` many standard deviations away from the mean. Heavy-tailed distributions are
therefore more likely to have "extreme" values.
Number of output columns is the same as number of input columns: each column is either transformed or not.
Parameters
----------
quantile : int (default = 98)
Used to calculate the lower and upper cutoff quantiles for a value to be considered "extreme".
This must be an integer between 0 and 100.
threshold_std : float (default = 4.0)
Number of standard deviations away from the mean (in standard units). For a given column, if the magnitude of
the quantile cutoffs is greater than the threshold_std cutoff, then that column contains an extreme value.
``threshold_std`` is converted to nonstandard units:
``nonstandard_thresholds = standard_threshold * np.std(X, axis=0) + np.mean(X, axis=0)``.
Attributes
----------
n_input_features_ : int
The number of columns in the input dataset.
quantiles_ : 2D array (2, n_input_features_)
For each column j, ``quantiles_[0, j]`` is the valueof the ``(100 - quantile)`` percentile and
``quantiles_[1, j]`` is the value of the ``quantile`` percentile.
cols_to_transform_ : list of int
List of column indices to determine which columns to apply the transformation of ``transform_function``.
quantile_transformer_ : ``sklearn.preprocessing.QuantileTransformer``
Instance of ``sklearn.preprocessing.QuantileTransformer``.
Notes
-----
Accepts only two-dimensional, dense input arrays.
This class inherits from ``sagemaker_sklearn_extension.preprocessing.BaseExtremeValueTransformer``.
"""
def __init__(self, quantile=98, threshold_std=4.0):
super().__init__(quantile=quantile, threshold_std=threshold_std)
def fit(self, X, y=None):
"""Compute the lower and upper quantile cutoffs, columns to transform, and each column's quantiles.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data array to transform. Must be numeric, non-sparse, and two-dimensional.
Returns
-------
self : QuantileExtremeValueTransformer
"""
super().fit(X)
self.quantile_transformer_ = QuantileTransformer(random_state=0, copy=True)
self.quantile_transformer_.fit(X)
return self
def _transform_function(self, x, idx=None):
"""Applies single column quantile transform from ``sklearn.preprocessing.QuantileTransformer``.
Uses ``quantile_transformer_.quantiles_`` calculated during ``fit`` if given an index, otherwise the quantiles
will be calculated from input ``x``.
"""
if idx:
return self.quantile_transformer_._transform_col( # pylint: disable=protected-access
x, self.quantile_transformer_.quantiles_[:, idx], False
)
return quantile_transform_nonrandom(x)
class RemoveConstantColumnsTransformer(BaseEstimator, TransformerMixin):
"""Removes columns that only contain one value.
Examples
----------
>>> X = [[0, 1, 2, np.nan],[0, np.nan, 2, np.nan],[0, 1, 3, np.nan]]
>>> selector = RemoveConstantColumnsTransformer()
>>> selector.fit_transform(X)
array([[1, 2],
[np.nan, 2],
[1, 3]])
Attributes
----------
n_input_features_ : int
The number of columns in the input dataset.
cols_to_transform_ : array of shape [n_input_features_, ]
A mask that indicates which columns have only one value
"""
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : array of shape [n_samples, n_features]
Input samples from which to check uniqueness.
Returns
-------
self
"""
X = check_array(X, force_all_finite=False)
_, self.n_input_features_ = X.shape
all_nan_cols = np.all(np.isnan(X), axis=0)
self.cols_to_transform_ = np.logical_or(
np.array([np.unique(X[:, j]).size == 1 for j in range(self.n_input_features_)]), all_nan_cols
)
return self
def transform(self, X):
"""Reduce X to features with a non-zero variance.
Parameters
----------
X : array of shape [n_samples, n_input_features_]
The input samples.
Returns
-------
X_t : array of shape [n_samples, n_selected_features]
The input samples with only features with a non-zero variance.
"""
check_is_fitted(self, "cols_to_transform_")
X = check_array(X, force_all_finite=False)
_, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape.")
# If all columns are constant return an empty array with shape (0, n_input_features_)
if np.sum(self.cols_to_transform_) == self.n_input_features_:
return np.empty((0, self.n_input_features_), dtype=X.dtype)
return X[:, ~self.cols_to_transform_]
def _more_tags(self):
return {"allow_nan": True} | /sagemaker-scikit-learn-extension-2.5.0.tar.gz/sagemaker-scikit-learn-extension-2.5.0/src/sagemaker_sklearn_extension/preprocessing/base.py | 0.960593 | 0.761006 | base.py | pypi |
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.impute import MissingIndicator, SimpleImputer
from sklearn.utils.validation import check_array, check_is_fitted
def is_finite_numeric(arr):
"""Helper function to check if values in an array can be converted to finite numeric
"""
def _is_finite_numeric(val):
try:
f = float(val)
return np.isfinite(f)
except ValueError:
return False
return np.vectorize(_is_finite_numeric)(arr)
def _get_mask(X, vectorized_mask_function):
"""Compute boolean mask of X for vectorized_mask_function(X) == False
"""
return np.logical_not(vectorized_mask_function(X).astype("bool"))
def _apply_mask(X, mask):
X[mask] = np.nan
return X
class RobustImputer(BaseEstimator, TransformerMixin):
"""Imputer for completing missing values.
Similar to sklearn.impute.SimpleImputer with added functionality
- RobustImputer uses a custom mask_function to determine values to impute.
The default mask_function is sagemaker_sklearn_extension.impute.is_finite_numeric
which checks if a value can be converted into a float.
- RobustImputer can perform multi-column imputation with different values
for each column (strategy=="constant")
Parameters
----------
dtype : string, type, list of types or None (default=None)
Data type for output.
- If left to default, numeric imputation strategies ("median" and "mean"),
output array dtype will always be floating point dtype. Otherwise it will be
np.dtype('O')
strategy : string, optional (default='median')
The imputation strategy.
- If "mean", then replace missing values using the mean along
each column. Can only be used with numeric data.
- If "median", then replace missing values using the median along
each column. Can only be used with numeric data.
- If "most_frequent", then replace missing using the most frequent
value along each column. Can be used with strings or numeric data.
- If "constant", then replace missing values with fill_values.
fill_values can be a singular value or a list of values equal to
number of columns. Can be used with strings or numeric data.
If fill_values is not set, fill_value will be 0 when imputing numerical
data and "missing_value" for strings or object data types.
fill_values : string, numerical value, or list, optional (default=None)
When strategy=="constant", fill_values is used to replace all
values that should be imputed.
- If string or numerical value, that one value will be used to replace
all values that should be imputed.
- If list, fill_values must equal to number of columns of input. Each
column will be imputed with the corresponding value in fill_values.
fill_values[i] will replace ith column (X[:,i]).
- If left to the default, fill_value will be 0 when imputing numerical
data and "missing_value" for strings or object data types.
mask_function : callable -> np.array, dtype('bool') (default=None)
A vectorized python function, accepts np.array, returns np.array
with dtype('bool')
For each value, if mask_function(val) == False, that value will
be imputed. mask_function is used to create a boolean mask that determines
which values in the input to impute.
Use np.vectorize to vectorize singular python functions.
If left to default, mask_function will be
sagemaker_sklearn_extension.impute.is_finite_numeric
Notes
-----
only accepts 2D, non-sparse inputs
"""
def __init__(self, dtype=None, strategy="median", fill_values=None, mask_function=None):
self.dtype = dtype
self.strategy = strategy
self.fill_values = fill_values
self.mask_function = mask_function
def _validate_input(self, X):
if self._is_constant_multicolumn_imputation():
if len(self.fill_values) != X.shape[1]:
raise ValueError(
"'fill_values' should have length equal to number of features in X {num_features}, "
"got {fill_values_length}".format(num_features=X.shape[1], fill_values_length=len(self.fill_values))
)
dtype = self.dtype or np.dtype("O")
if hasattr(X, "dtype") and X.dtype is not None and hasattr(X.dtype, "kind") and X.dtype.kind == "c":
raise ValueError("Complex data not supported\n{}\n".format(X))
return check_array(X, dtype=dtype, copy=True, force_all_finite=False, ensure_2d=True)
def _is_constant_multicolumn_imputation(self):
return self.strategy == "constant" and isinstance(self.fill_values, (list, tuple, np.ndarray))
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : RobustImputer
"""
X = self._validate_input(X)
self.vectorized_mask_function_ = self.mask_function or is_finite_numeric
X = _apply_mask(X, _get_mask(X, self.vectorized_mask_function_))
if self._is_constant_multicolumn_imputation():
self.simple_imputer_ = SimpleImputer(strategy=self.strategy)
else:
self.simple_imputer_ = SimpleImputer(strategy=self.strategy, fill_value=self.fill_values)
self.simple_imputer_.fit(X)
# set "SimpleImputer.statistics_" for multicolumn imputations with different column fill values
# SimpleImputer cannot preform multicolumn imputation with different column fill values
if self._is_constant_multicolumn_imputation():
self.simple_imputer_.statistics_ = np.asarray(self.fill_values)
return self
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray}, shape (n_samples, n_features)
The imputed input data. The data type of ``Xt``
will depend on your input dtype.
"""
check_is_fitted(self, ["simple_imputer_", "vectorized_mask_function_"])
X = self._validate_input(X)
if X.shape[1] != self.simple_imputer_.statistics_.shape[0]:
raise ValueError(
"'transform' input X has {transform_dim} features per sample, "
"expected {fit_dim} from 'fit' input".format(
transform_dim=X.shape[1], fit_dim=self.simple_imputer_.statistics_.shape[0]
)
)
X = _apply_mask(X, _get_mask(X, self.vectorized_mask_function_))
return self.simple_imputer_.transform(X).astype(self.dtype)
def _more_tags(self):
return {"allow_nan": True}
class RobustMissingIndicator(BaseEstimator, TransformerMixin):
"""Binary indicators for missing values.
Note that this component typically should not be used in a vanilla
:class:`sklearn.pipeline.Pipeline` consisting of transformers and a classifier,
but rather could be added using a :class:`sklearn.pipeline.FeatureUnion` or
:class:`sklearn.compose.ColumnTransformer`.
Similar to sklearn.impute.MissingIndicator with added functionality
- RobustMissingIndicator uses a custom mask_function to determine the boolean mask.
The default mask_function is sagemaker_sklearn_extension.impute.is_finite_numeric
which checks whether or not a value can be converted into a float.
Parameters
----------
features : str, optional (default="all")
Whether the imputer mask should represent all or a subset of
features.
- If "missing-only", the imputer mask will only represent
features containing missing values during fit time.
- If "all" (default), the imputer mask will represent all features.
error_on_new : boolean, optional (default=True)
If True (default), transform will raise an error when there are
features with missing values in transform that have no missing values
in fit. This is applicable only when ``features="missing-only"``.
mask_function : callable -> np.array, dtype('bool') (default=None)
A vectorized python function, accepts np.array, returns np.array
with dtype('bool')
For each value, if mask_function(val) == False, that value will
be imputed. mask_function is used to create a boolean mask that determines
which values in the input to impute.
Use np.vectorize to vectorize singular python functions.
By default, mask_function will be
sagemaker_sklearn_extension.impute.is_finite_numeric
Notes
-----
only accepts 2D, non-sparse inputs
"""
def __init__(self, features="all", error_on_new=True, mask_function=None):
self.features = features
self.error_on_new = error_on_new
self.mask_function = mask_function
def _validate_input(self, X):
if hasattr(X, "dtype") and X.dtype is not None and hasattr(X.dtype, "kind") and X.dtype.kind == "c":
raise ValueError("Complex data not supported\n{}\n".format(X))
return check_array(X, dtype=np.dtype("O"), copy=True, force_all_finite=False, ensure_2d=True)
def fit(self, X, y=None):
"""Fit the transformer on X.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : RobustMissingIndicator
"""
X = self._validate_input(X)
self.vectorized_mask_function_ = self.mask_function or is_finite_numeric
X = _apply_mask(X, _get_mask(X, self.vectorized_mask_function_))
self.missing_indicator_ = MissingIndicator(features=self.features, error_on_new=self.error_on_new)
self.missing_indicator_.fit(X)
return self
def transform(self, X):
"""Generate missing values indicator for X.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray}, shape (n_samples, n_features)
The missing indicator for input data. The data type of ``Xt``
will be boolean.
"""
check_is_fitted(self, ["missing_indicator_", "vectorized_mask_function_"])
X = self._validate_input(X)
X = _apply_mask(X, _get_mask(X, self.vectorized_mask_function_))
return self.missing_indicator_.transform(X)
def _more_tags(self):
return {"allow_nan": True} | /sagemaker-scikit-learn-extension-2.5.0.tar.gz/sagemaker-scikit-learn-extension-2.5.0/src/sagemaker_sklearn_extension/impute/base.py | 0.893675 | 0.697763 | base.py | pypi |
import json
import logging
import os
from typing import Any
logger = logging.getLogger(__name__)
STDOUT_LEVEL = logging.INFO
class JSONFormatter(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
"""
Create a structured log message
CloudWatch does not separate log streams, so we use source
to differentiate between stdout and stderr.
Internal allows us to differentiate between logs from
this application, and logs from the invocation subprocess.
"""
message = super().format(record=record)
# We need to explicitly add the source as an annotation
# as CloudWatch unifies the logs
if record.levelno <= STDOUT_LEVEL:
source = "stdout"
else:
source = "stderr"
internal = getattr(record, "internal", True)
task = getattr(record, "task", None)
task_pk = getattr(task, "pk", None)
return "\n".join(
json.dumps(
{
"log": m,
"level": record.levelname,
"source": source,
"internal": internal,
"task": task_pk,
}
)
for m in message.splitlines()
)
class StdStreamFilter(logging.Filter):
"""Split stdout and stderr streams"""
def __init__(self, *args: Any, stdout: bool, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.__stdout = stdout
def filter(self, record: logging.LogRecord) -> bool:
"""Should this log message be displayed?"""
if self.__stdout:
# stdout, STDOUT_LEVEL and lower
return record.levelno <= STDOUT_LEVEL
else:
# stderr, greater than STDOUT_LEVEL
return record.levelno > STDOUT_LEVEL
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"stdout": {
"()": StdStreamFilter,
"stdout": True,
},
"stderr": {
"()": StdStreamFilter,
"stdout": False,
},
},
"formatters": {
"json": {
"()": JSONFormatter,
},
},
"handlers": {
"console_stdout": {
"class": "logging.StreamHandler",
"formatter": "json",
"stream": "ext://sys.stdout",
"filters": ["stdout"],
},
"console_stderr": {
"class": "logging.StreamHandler",
"formatter": "json",
"stream": "ext://sys.stderr",
"filters": ["stderr"],
},
},
"root": {
"level": os.environ.get("LOG_LEVEL", "INFO").upper(),
"handlers": ["console_stdout", "console_stderr"],
},
} | /sagemaker_shim-0.1.1-py3-none-any.whl/sagemaker_shim/logging.py | 0.601008 | 0.223377 | logging.py | pypi |
import logging
import re
import zipfile
from os.path import commonpath
from pathlib import Path
from sagemaker_shim.exceptions import ZipExtractionError
from sagemaker_shim.vendor.werkzeug.security import safe_join
logger = logging.getLogger(__name__)
def _filter_members(members: list[zipfile.ZipInfo]) -> list[dict[str, str]]:
"""Filter common prefixes and uninteresting files from a zip archive"""
members = [
m
for m in members
if not m.is_dir()
and re.search(r"(__MACOSX|\.DS_Store|desktop.ini)", m.filename) is None
]
# Remove any common parent directories
if len(members) == 1:
path = str(Path(members[0].filename).parent)
path = "" if path == "." else path
else:
path = commonpath([m.filename for m in members])
if path:
sliced_path = slice(len(path) + 1, None, None)
else:
sliced_path = slice(None, None, None)
return [
{"src": m.filename, "dest": m.filename[sliced_path]} for m in members
]
def safe_extract(*, src: Path, dest: Path) -> None:
"""
Safely extracts a zip file into a directory
Any common prefixes and system files are removed.
"""
if not dest.exists():
raise RuntimeError("The destination must exist")
with src.open("rb") as f:
with zipfile.ZipFile(f) as zf:
members = _filter_members(zf.infolist())
for member in members:
file_dest = safe_join(str(dest), member["dest"])
if file_dest is None:
raise ZipExtractionError("Zip file contains invalid paths")
# We know that the dest is within the prefix as
# safe_join is used, and the destination is already
# created, so ok to create the parents here
file_dest.parent.mkdir(exist_ok=True, parents=True)
logger.info(
f"Extracting {member['src']=} from {src} to {file_dest}"
)
with zf.open(member["src"], "r") as fs, open(
file_dest, "wb"
) as fd:
while chunk := fs.read(8192):
fd.write(chunk) | /sagemaker_shim-0.1.1-py3-none-any.whl/sagemaker_shim/utils.py | 0.405213 | 0.208884 | utils.py | pypi |
from typing import Union
import requests
import socket
from ssl import get_server_certificate, SSLError
from sagemaker_studio_analytics_extension.utils.string_utils import *
from sagemaker_studio_analytics_extension.utils.constants import (
VerifyCertificateArgument,
)
def check_host_and_port(host, port):
"""
Check if the port is alive for designated host.
:param host: host name
:param port: port to check
:return: True or False indicate if port is alive
"""
if is_blank(host):
print(f"[Error] Host must not be empty.")
return False
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# timeout as 5 seconds
sock.settimeout(5)
try:
result = sock.connect_ex((host, port))
if result == 0:
return True
else:
# when connect_ex return an error indicator instead of raising an exception for errors,
# print the information and it will be displayed in cell output when used in notebook.
print(
f"Host: {host} port: {port} is not connectible via socket with {result} returned."
)
return False
except OSError as msg:
# use print directly as for jupyter env the error message will displayed in cell output
print(
f"[Error] Failed to check host and port [{host}:{port}]. Error message: {msg}"
)
return False
def is_ssl_enabled(host, port):
"""
Check if the host/port is SSL enabled.
:param host: host name
:param port: port to check
:return: True or False indicate if SSL is enabled or not
"""
try:
cert = get_server_certificate((host, port))
return cert is not None
except SSLError:
# only return false for SSL error, propagate other types of errors
return False
def verify_ssl_cert(
host, port, cert_arg: VerifyCertificateArgument
) -> Union[Exception, None]:
"""
Attempts to connect to the https://host:port using provided cert to verify that cert is valid. This will check SSL
certificate verification success, or fail with the following:
If a PathToCert is provided, we expect to get the following exceptions:
1. SSLError -- If we could not connect to the host:port via SSL
2. OSError -- If the provided path is invalid
:param host:
:param port:
:param cert_arg: VerifyCertificateArgument | Value can be True (Use public cert) / False (Do not validate cert) / PathToCert (Path to local cert)
:return:
"""
try:
requests.get("https://{}:{}".format(host, port), verify=cert_arg.value)
except (SSLError, requests.exceptions.SSLError, OSError, Exception) as e:
"""
Also catch generic "Exception" so that this code path doesn't fail execution. Most likely, the same Exception will
also be raised when we try to actually connect to execute the connection to given host:port. Relegate failure to that code path.
"""
return e
return None | /sagemaker-studio-analytics-extension-0.0.19.tar.gz/sagemaker-studio-analytics-extension-0.0.19/src/sagemaker_studio_analytics_extension/utils/resource_check.py | 0.659734 | 0.192388 | resource_check.py | pypi |
=====================================
SageMaker TensorFlow Training Toolkit
=====================================
The SageMaker TensorFlow Training Toolkit is an open source library for making the
TensorFlow framework run on `Amazon SageMaker <https://aws.amazon.com/documentation/sagemaker/>`__.
This repository also contains Dockerfiles which install this library, TensorFlow, and dependencies
for building SageMaker TensorFlow images.
For information on running TensorFlow jobs on SageMaker:
- `SageMaker Python SDK documentation <https://sagemaker.readthedocs.io/en/stable/using_tf.html>`__
- `SageMaker Notebook Examples <https://github.com/awslabs/amazon-sagemaker-examples>`__
Table of Contents
-----------------
#. `Getting Started <#getting-started>`__
#. `Building your Image <#building-your-image>`__
#. `Running the tests <#running-the-tests>`__
Getting Started
---------------
Prerequisites
~~~~~~~~~~~~~
Make sure you have installed all of the following prerequisites on your
development machine:
- `Docker <https://www.docker.com/>`__
For Testing on GPU
^^^^^^^^^^^^^^^^^^
- `Nvidia-Docker <https://github.com/NVIDIA/nvidia-docker>`__
Recommended
^^^^^^^^^^^
- A Python environment management tool. (e.g.
`PyEnv <https://github.com/pyenv/pyenv>`__,
`VirtualEnv <https://virtualenv.pypa.io/en/stable/>`__)
Building your Image
-------------------
`Amazon SageMaker <https://aws.amazon.com/documentation/sagemaker/>`__
utilizes Docker containers to run all training jobs & inference endpoints.
The Docker images are built from the Dockerfiles specified in
`docker/ <https://github.com/aws/sagemaker-tensorflow-containers/tree/master/docker>`__.
The Dockerfiles are grouped based on TensorFlow version and separated
based on Python version and processor type.
The Dockerfiles for TensorFlow 2.0+ are available in the
`tf-2 <https://github.com/aws/sagemaker-tensorflow-container/tree/tf-2>`__ branch.
To build the images, first copy the files under
`docker/build_artifacts/ <https://github.com/aws/sagemaker-tensorflow-container/tree/tf-2/docker/build_artifacts>`__
to the folder container the Dockerfile you wish to build.
::
# Example for building a TF 2.1 image with Python 3
cp docker/build_artifacts/* docker/2.1.0/py3/.
After that, go to the directory containing the Dockerfile you wish to build,
and run ``docker build`` to build the image.
::
# Example for building a TF 2.1 image for CPU with Python 3
cd docker/2.1.0/py3
docker build -t tensorflow-training:2.1.0-cpu-py3 -f Dockerfile.cpu .
Don't forget the period at the end of the ``docker build`` command!
Running the tests
-----------------
Running the tests requires installation of the SageMaker TensorFlow Training Toolkit code and its test
dependencies.
::
git clone https://github.com/aws/sagemaker-tensorflow-container.git
cd sagemaker-tensorflow-container
pip install -e .[test]
Tests are defined in
`test/ <https://github.com/aws/sagemaker-tensorflow-container/tree/master/test>`__
and include unit, integration and functional tests.
Unit Tests
~~~~~~~~~~
If you want to run unit tests, then use:
::
# All test instructions should be run from the top level directory
pytest test/unit
Integration Tests
~~~~~~~~~~~~~~~~~
Running integration tests require `Docker <https://www.docker.com/>`__ and `AWS
credentials <https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/setup-credentials.html>`__,
as the integration tests make calls to a couple AWS services. The integration and functional
tests require configurations specified within their respective
`conftest.py <https://github.com/aws/sagemaker-tensorflow-containers/blob/master/test/integration/conftest.py>`__.Make sure to update the account-id and region at a minimum.
Integration tests on GPU require `Nvidia-Docker <https://github.com/NVIDIA/nvidia-docker>`__.
Before running integration tests:
#. Build your Docker image.
#. Pass in the correct pytest arguments to run tests against your Docker image.
If you want to run local integration tests, then use:
::
# Required arguments for integration tests are found in test/integ/conftest.py
pytest test/integration --docker-base-name <your_docker_image> \
--tag <your_docker_image_tag> \
--framework-version <tensorflow_version> \
--processor <cpu_or_gpu>
::
# Example
pytest test/integration --docker-base-name preprod-tensorflow \
--tag 1.0 \
--framework-version 1.4.1 \
--processor cpu
Functional Tests
~~~~~~~~~~~~~~~~
Functional tests are removed from the current branch, please see them in older branch `r1.0 <https://github.com/aws/sagemaker-tensorflow-container/tree/r1.0#functional-tests>`__.
Contributing
------------
Please read
`CONTRIBUTING.md <https://github.com/aws/sagemaker-tensorflow-containers/blob/master/CONTRIBUTING.md>`__
for details on our code of conduct, and the process for submitting pull
requests to us.
License
-------
SageMaker TensorFlow Containers is licensed under the Apache 2.0 License. It is copyright 2018
Amazon.com, Inc. or its affiliates. All Rights Reserved. The license is available at:
http://aws.amazon.com/apache2.0/
| /sagemaker_tensorflow_training-20.4.1.tar.gz/sagemaker_tensorflow_training-20.4.1/README.rst | 0.932576 | 0.769037 | README.rst | pypi |
from __future__ import absolute_import
import json
import logging
import multiprocessing
import os
import subprocess
import time
from sagemaker_training import entry_point, environment, mapping, runner
import tensorflow as tf
from sagemaker_tensorflow_container import s3_utils
logger = logging.getLogger(__name__)
SAGEMAKER_PARAMETER_SERVER_ENABLED = "sagemaker_parameter_server_enabled"
SAGEMAKER_DISTRIBUTED_DATAPARALLEL_ENABLED = "sagemaker_distributed_dataparallel_enabled"
SAGEMAKER_MULTI_WORKER_MIRRORED_STRATEGY_ENABLED = (
"sagemaker_multi_worker_mirrored_strategy_enabled"
)
MODEL_DIR = "/opt/ml/model"
def _is_host_master(hosts, current_host):
return current_host == hosts[0]
def _build_tf_config_for_ps(hosts, current_host, ps_task=False):
"""Builds a dictionary containing cluster information based on number of hosts and number of
parameter servers.
Args:
hosts (list[str]): List of host names in the cluster
current_host (str): Current host name
ps_task (bool): Set to True if this config is built for a parameter server process
(default: False)
Returns:
dict[str: dict]: A dictionary describing the cluster setup for distributed training.
For more information regarding TF_CONFIG:
https://cloud.google.com/ml-engine/docs/tensorflow/distributed-training-details
"""
# Assign the first host as the master. Rest of the hosts if any will be worker hosts.
# The first ps_num hosts will also have a parameter task assign to them.
masters = hosts[:1]
workers = hosts[1:]
ps = hosts if len(hosts) > 1 else None
def host_addresses(hosts, port=2222):
return ["{}:{}".format(host, port) for host in hosts]
tf_config = {"cluster": {"master": host_addresses(masters)}, "environment": "cloud"}
if ps:
tf_config["cluster"]["ps"] = host_addresses(ps, port="2223")
if workers:
tf_config["cluster"]["worker"] = host_addresses(workers)
if ps_task:
if ps is None:
raise ValueError(
"Cannot have a ps task if there are no parameter servers in the cluster"
)
task_type = "ps"
task_index = ps.index(current_host)
elif _is_host_master(hosts, current_host):
task_type = "master"
task_index = 0
else:
task_type = "worker"
task_index = workers.index(current_host)
tf_config["task"] = {"index": task_index, "type": task_type}
return tf_config
def _build_tf_config_for_mwms(hosts, current_host):
"""Builds a dictionary containing cluster information based on number of workers
for Multi Worker Mirrored distribution strategy.
Args:
hosts (list[str]): List of host names in the cluster
current_host (str): Current host name
Returns:
dict[str: dict]: A dictionary describing the cluster setup for distributed training.
For more information regarding TF_CONFIG:
https://cloud.google.com/ml-engine/docs/tensorflow/distributed-training-details
"""
workers = hosts
def host_addresses(hosts, port=8890):
return ["{}:{}".format(host, port) for host in hosts]
tf_config = {"cluster": {}, "environment": "cloud"}
tf_config["cluster"]["worker"] = host_addresses(workers)
tf_config["task"] = {"index": workers.index(current_host), "type": "worker"}
return tf_config
def _run_ps(env, cluster):
logger.info("Running distributed training job with parameter servers")
cluster_spec = tf.train.ClusterSpec(cluster)
task_index = env.hosts.index(env.current_host)
# Force parameter server to run on cpu. Running multiple TensorFlow processes on the same
# GPU is not safe:
# https://stackoverflow.com/questions/46145100/is-it-unsafe-to-run-multiple-tensorflow-processes-on-the-same-gpu
no_gpu_config = tf.compat.v1.ConfigProto(device_count={"GPU": 0})
server = tf.distribute.Server(
cluster_spec, job_name="ps", task_index=task_index, config=no_gpu_config
)
multiprocessing.Process(target=lambda: server.join()).start()
def _run_worker(env, cmd_args, tf_config):
env_vars = env.to_env_vars()
env_vars["TF_CONFIG"] = json.dumps(tf_config)
entry_point.run(
uri=env.module_dir,
user_entry_point=env.user_entry_point,
args=cmd_args,
env_vars=env_vars,
capture_error=True,
)
def _wait_until_master_is_down(master):
while True:
try:
subprocess.check_call(
["curl", "{}:2222".format(master)], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
logger.info("master {} is still up, waiting for it to exit".format(master))
time.sleep(10)
except subprocess.CalledProcessError:
logger.info("master {} is down, stopping parameter server".format(master))
return
def train(env, cmd_args):
"""Get training job environment from env and run the training job.
Args:
env (sagemaker_training.environment.Environment): Instance of Environment class
"""
parameter_server_enabled = (
env.additional_framework_parameters.get(SAGEMAKER_PARAMETER_SERVER_ENABLED, False)
and len(env.hosts) > 1
)
multi_worker_mirrored_strategy_enabled = env.additional_framework_parameters.get(
SAGEMAKER_MULTI_WORKER_MIRRORED_STRATEGY_ENABLED, False
)
sagemaker_distributed_dataparallel_enabled = env.additional_framework_parameters.get(
SAGEMAKER_DISTRIBUTED_DATAPARALLEL_ENABLED, False
)
env_vars = env.to_env_vars()
# Setup
if env.current_instance_group in env.distribution_instance_groups:
if parameter_server_enabled:
tf_config = _build_tf_config_for_ps(hosts=env.distribution_hosts, current_host=env.current_host)
logger.info("Running distributed training job with parameter servers")
elif multi_worker_mirrored_strategy_enabled:
env_vars["TF_CONFIG"] = json.dumps(
_build_tf_config_for_mwms(hosts=env.distribution_hosts, current_host=env.current_host)
)
logger.info("Running distributed training job with multi_worker_mirrored_strategy setup")
runner_type = runner.ProcessRunnerType
# Run
if parameter_server_enabled:
logger.info("Launching parameter server process")
_run_ps(env, tf_config["cluster"])
logger.info("Launching worker process")
_run_worker(env, cmd_args, tf_config)
if not _is_host_master(env.hosts, env.current_host):
_wait_until_master_is_down(env.hosts[0])
else:
if env.current_instance_group in env.distribution_instance_groups:
mpi_enabled = env.additional_framework_parameters.get("sagemaker_mpi_enabled")
if mpi_enabled:
runner_type = runner.MPIRunnerType
elif sagemaker_distributed_dataparallel_enabled:
runner_type = runner.SMDataParallelRunnerType
entry_point.run(
uri=env.module_dir,
user_entry_point=env.user_entry_point,
args=cmd_args,
env_vars=env_vars,
capture_error=True,
runner_type=runner_type,
)
def _log_model_missing_warning(model_dir):
pb_file_exists = False
file_exists = False
for dirpath, dirnames, filenames in os.walk(model_dir):
if filenames:
file_exists = True
for f in filenames:
if "saved_model.pb" in f or "saved_model.pbtxt" in f:
pb_file_exists = True
path, direct_parent_dir = os.path.split(dirpath)
if not str.isdigit(direct_parent_dir):
logger.warn(
"Your model will NOT be servable with SageMaker TensorFlow Serving containers. "
'The SavedModel bundle is under directory "{}", not a numeric name.'.format(
direct_parent_dir
)
)
if not file_exists:
logger.warn(
"No model artifact is saved under path {}."
" Your training job will not save any model files to S3.\n"
"For details of how to construct your training script see:\n"
"https://sagemaker.readthedocs.io/en/stable/using_tf.html#adapting-your-local-tensorflow-script".format(
model_dir
)
)
elif not pb_file_exists:
logger.warn(
"Your model will NOT be servable with SageMaker TensorFlow Serving container. "
"The model artifact was not saved in the TensorFlow SavedModel directory structure:\n"
"https://www.tensorflow.org/guide/saved_model#structure_of_a_savedmodel_directory"
)
def _model_dir_with_training_job(model_dir, job_name):
if model_dir and model_dir.startswith("/opt/ml"):
return model_dir
else:
return "{}/{}/model".format(model_dir, job_name)
def main():
"""Training entry point"""
hyperparameters = environment.read_hyperparameters()
env = environment.Environment(hyperparameters=hyperparameters)
user_hyperparameters = env.hyperparameters
# If the training job is part of the multiple training jobs for tuning, we need to append the training job name to
# model_dir in case they read from/write to the same object
if "_tuning_objective_metric" in hyperparameters:
model_dir = _model_dir_with_training_job(hyperparameters.get("model_dir"), env.job_name)
logger.info("Appending the training job name to model_dir: {}".format(model_dir))
user_hyperparameters["model_dir"] = model_dir
s3_utils.configure(user_hyperparameters.get("model_dir"), os.environ.get("SAGEMAKER_REGION"))
train(env, mapping.to_cmd_args(user_hyperparameters))
_log_model_missing_warning(MODEL_DIR) | /sagemaker_tensorflow_training-20.4.1.tar.gz/sagemaker_tensorflow_training-20.4.1/src/sagemaker_tensorflow_container/training.py | 0.676727 | 0.221624 | training.py | pypi |
from __future__ import absolute_import
import errno
import json
import os
import tensorflow as tf
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import dtypes
def _load_plugin():
tf_plugin_path = '/' + '/'.join(list(__file__.split('/'))[:-1] + ["libPipeModeOp.so"])
return tf.load_op_library(tf_plugin_path)
class PipeModeDatasetException(Exception):
"""An error using a PipeModeDataset."""
pass
class PipeModeDataset(dataset_ops.Dataset):
"""A SageMaker Pipe Mode TensorFlow Dataset."""
_tf_plugin = _load_plugin()
def __init__(self, channel, record_format='RecordIO',
state_dir='/opt/ml/pipe_state', pipe_dir='/opt/ml/input/data',
config_dir='/opt/ml/input/config', benchmark=False, benchmark_records_interval=0,
max_corrupted_records_to_skip=0):
"""Create a Dataset for reading from a SageMaker PipeMode channel.
Supports records encoded using either RecordIO, TFRecord, or new line text encoding.
Args:
record_format: The record format to use. One of 'RecordIO', 'TFRecord', or 'TextLine'
channel: The name of the SageMaker channel.
pipe_dir: The directory to read SageMaker Channels from.
state_dir: The directory where pipe index state is persisted.
config_dir: The path for SageMaker input data config.
benchmark: Controls whether to emit timing and throughput metrics after closing an Iterator created from
this Dataset. If True, timing and throughput metrics will be emitted to stdout after an Iterator
created from this Dataset is destroyed.
benchmark_records_interval: Controls whether to emit timing and throughput metrics while records are being
read from this Dataset. Defines the number of records per interval to emit timing and throughput
metrics. If zero, no metrics will be emitted while records are being read from this Dataset.
Metrics are emitted to stdout.
max_corrupted_records_to_skip: the number of corrupted records encountered in sequence that it's ok to
skip. Only applicable for record_format='TFRecord'.
"""
try:
os.makedirs(state_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
self.record_format = record_format
self.channel = channel
self.pipe_dir = pipe_dir
self.state_dir = state_dir
self.benchmark = benchmark
self.benchmark_records_interval = benchmark_records_interval
self.max_corrupted_records_to_skip = max_corrupted_records_to_skip
with open(os.path.join(config_dir, 'inputdataconfig.json')) as f:
self.input_data_config = json.load(f)
self._validate_input_data_config()
if self.max_corrupted_records_to_skip > 0 and record_format != 'TFRecord':
raise PipeModeDatasetException("max_corrupted_records_to_skip can only be set for record_format='TFRecord'")
super(PipeModeDataset, self).__init__(variant_tensor=self._as_variant_tensor())
def _as_variant_tensor(self):
return self._tf_plugin.pipe_mode_dataset(self.benchmark, self.record_format, self.state_dir, self.channel,
self.pipe_dir, self.benchmark_records_interval,
self.max_corrupted_records_to_skip)
def _inputs(self):
return []
def _validate_input_data_config(self):
if self.channel not in self.input_data_config:
raise PipeModeDatasetException("Channel {} not found in Training Job InputDataConfig".format(self.channel))
if self.input_data_config[self.channel].get('TrainingInputMode', "").lower() != "pipe":
raise PipeModeDatasetException("Channel {} is not a PipeMode channel".format(self.channel))
@property
def output_classes(self):
"""The return type of this Dataset."""
return ops.Tensor
@property
def output_shapes(self):
"""The shape of the output Tensor."""
return tensor_shape.TensorShape([])
@property
def output_types(self):
"""The type of data stored in the output Tensor."""
return dtypes.string
@property
def element_spec(self):
return tensor_spec.TensorSpec(
shape=self.output_shapes,
dtype=self.output_types,
name=self.channel,
) | /sagemaker_tensorflow-2.13.0.1.19.0-cp310-cp310-manylinux1_x86_64.whl/sagemaker_tensorflow/pipemode.py | 0.830181 | 0.236483 | pipemode.py | pypi |

# SageMaker Training Toolkit
[](https://pypi.python.org/pypi/sagemaker-training) [](https://pypi.python.org/pypi/sagemaker-training) [](https://github.com/python/black)
Train machine learning models within a Docker container using Amazon SageMaker.
## :books: Background
[Amazon SageMaker](https://aws.amazon.com/sagemaker/) is a fully managed service for data science and machine learning (ML) workflows.
You can use Amazon SageMaker to simplify the process of building, training, and deploying ML models.
To train a model, you can include your training script and dependencies in a [Docker container](https://www.docker.com/resources/what-container) that runs your training code.
A container provides an effectively isolated environment, ensuring a consistent runtime and reliable training process.
The **SageMaker Training Toolkit** can be easily added to any Docker container, making it compatible with SageMaker for [training models](https://aws.amazon.com/sagemaker/train/).
If you use a [prebuilt SageMaker Docker image for training](https://docs.aws.amazon.com/sagemaker/latest/dg/pre-built-containers-frameworks-deep-learning.html), this library may already be included.
For more information, see the Amazon SageMaker Developer Guide sections on [using Docker containers for training](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html).
## :hammer_and_wrench: Installation
To install this library in your Docker image, add the following line to your [Dockerfile](https://docs.docker.com/engine/reference/builder/):
``` dockerfile
RUN pip3 install sagemaker-training
```
## :computer: Usage
The following are brief how-to guides.
For complete, working examples of custom training containers built with the SageMaker Training Toolkit, please see [the example notebooks](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/advanced_functionality/custom-training-containers).
### Create a Docker image and train a model
1. Write a training script (eg. `train.py`).
2. [Define a container with a Dockerfile](https://docs.docker.com/get-started/part2/#define-a-container-with-dockerfile) that includes the training script and any dependencies.
The training script must be located in the `/opt/ml/code` directory.
The environment variable `SAGEMAKER_PROGRAM` defines which file inside the `/opt/ml/code` directory to use as the training entry point.
When training starts, the interpreter executes the entry point defined by `SAGEMAKER_PROGRAM`.
Python and shell scripts are both supported.
``` docker
FROM yourbaseimage:tag
# install the SageMaker Training Toolkit
RUN pip3 install sagemaker-training
# copy the training script inside the container
COPY train.py /opt/ml/code/train.py
# define train.py as the script entry point
ENV SAGEMAKER_PROGRAM train.py
```
3. Build and tag the Docker image.
``` shell
docker build -t custom-training-container .
```
4. Use the Docker image to start a training job using the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk).
``` python
from sagemaker.estimator import Estimator
estimator = Estimator(image_name="custom-training-container",
role="SageMakerRole",
train_instance_count=1,
train_instance_type="local")
estimator.fit()
```
To train a model using the image on SageMaker, [push the image to ECR](https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-push-ecr-image.html) and start a SageMaker training job with the image URI.
### Pass arguments to the entry point using hyperparameters
Any hyperparameters provided by the training job are passed to the entry point as script arguments.
The SageMaker Python SDK uses this feature to pass special hyperparameters to the training job, including `sagemaker_program` and `sagemaker_submit_directory`.
The complete list of SageMaker hyperparameters is available [here](https://github.com/aws/sagemaker-training-toolkit/blob/master/src/sagemaker_training/params.py).
1. Implement an argument parser in the entry point script. For example, in a Python script:
``` python
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--learning-rate", type=int, default=1)
parser.add_argument("--batch-size", type=int, default=64)
parser.add_argument("--communicator", type=str)
parser.add_argument("--frequency", type=int, default=20)
args = parser.parse_args()
...
```
2. Start a training job with hyperparameters.
``` python
{"HyperParameters": {"batch-size": 256, "learning-rate": 0.0001, "communicator": "pure_nccl"}}
```
### Read additional information using environment variables
An entry point often needs additional information not available in `hyperparameters`.
The SageMaker Training Toolkit writes this information as environment variables that are available from within the script.
For example, this training job includes the channels `training` and `testing`:
``` python
from sagemaker.pytorch import PyTorch
estimator = PyTorch(entry_point="train.py", ...)
estimator.fit({"training": "s3://bucket/path/to/training/data",
"testing": "s3://bucket/path/to/testing/data"})
```
The environment variables `SM_CHANNEL_TRAINING` and `SM_CHANNEL_TESTING` provide the paths to the channels:
``` python
import argparse
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser()
...
# reads input channels training and testing from the environment variables
parser.add_argument("--training", type=str, default=os.environ["SM_CHANNEL_TRAINING"])
parser.add_argument("--testing", type=str, default=os.environ["SM_CHANNEL_TESTING"])
args = parser.parse_args()
...
```
When training starts, SageMaker Training Toolkit will print all available environment variables. Please see the [reference on environment variables](https://github.com/aws/sagemaker-training-toolkit/blob/master/ENVIRONMENT_VARIABLES.md) for a full list of provided environment variables.
### Get information about the container environment
To get information about the container environment, initialize an `Environment` object.
`Environment` provides access to aspects of the environment relevant to training jobs, including hyperparameters, system characteristics, filesystem locations, environment variables and configuration settings.
It is a read-only snapshot of the container environment during training, and it doesn't contain any form of state.
``` python
from sagemaker_training import environment
env = environment.Environment()
# get the path of the channel "training" from the `inputdataconfig.json` file
training_dir = env.channel_input_dirs["training"]
# get a the hyperparameter "training_data_file" from `hyperparameters.json` file
file_name = env.hyperparameters["training_data_file"]
# get the folder where the model should be saved
model_dir = env.model_dir
# train the model
data = np.load(os.path.join(training_dir, file_name))
x_train, y_train = data["features"], keras.utils.to_categorical(data["labels"])
model = ResNet50(weights="imagenet")
...
model.fit(x_train, y_train)
#save the model to the model_dir at the end of training
model.save(os.path.join(model_dir, "saved_model"))
```
### Execute the entry point
To execute the entry point, call `entry_point.run()`.
``` python
from sagemaker_training import entry_point, environment
env = environment.Environment()
# read hyperparameters as script arguments
args = env.to_cmd_args()
# get the environment variables
env_vars = env.to_env_vars()
# execute the entry point
entry_point.run(uri=env.module_dir,
user_entry_point=env.user_entry_point,
args=args,
env_vars=env_vars)
```
If the entry point execution fails, `trainer.train()` will write the error message to `/opt/ml/output/failure`. Otherwise, it will write to the file `/opt/ml/success`.
## :scroll: License
This library is licensed under the [Apache 2.0 License](http://aws.amazon.com/apache2.0/).
For more details, please take a look at the [LICENSE](https://github.com/aws/sagemaker-training-toolkit/blob/master/LICENSE) file.
## :handshake: Contributing
Contributions are welcome!
Please read our [contributing guidelines](https://github.com/aws/sagemaker-training-toolkit/blob/master/CONTRIBUTING.md)
if you'd like to open an issue or submit a pull request.
| /sagemaker_training-4.7.0.tar.gz/sagemaker_training-4.7.0/README.md | 0.859487 | 0.960435 | README.md | pypi |
"""This module contains utility functions used to generate recordio-protobuf format."""
import struct
import sys
import numpy as np
from scipy.sparse import issparse
from sagemaker_training.record_pb2 import Record
def _resolve_type(dtype):
"""Return the type string corresponding to the numpy.dtype.
Args:
dtype (numpy.dtype or str): numpy.dtype object.
Returns:
(str): String corresponding to the dtype.
"""
if dtype == np.dtype(int):
return "Int32"
if dtype == np.dtype(float):
return "Float64"
if dtype == np.dtype("float32"):
return "Float32"
raise ValueError("Unsupported dtype {} on array".format(dtype))
def _write_feature_tensor(resolved_type, record, vector):
"""Write the feature tensor in the record based on the resolved type.
Args:
resolved_type (str): String representing the feature type.
record (Record object): Record object to write to.
vector (np.array or csr_matrix): Represents the row (1D Array).
"""
if resolved_type == "Int32":
record.features["values"].int32_tensor.values.extend(vector)
elif resolved_type == "Float64":
record.features["values"].float64_tensor.values.extend(vector)
elif resolved_type == "Float32":
record.features["values"].float32_tensor.values.extend(vector)
def _write_label_tensor(resolved_type, record, scalar):
"""Writes the label to record based on the resolved type.
Args:
resolved_type (str): String representing the feature type.
record (Record object): Record object.
scalar (int or float32 or float64): Label value.
"""
if resolved_type == "Int32":
record.label["values"].int32_tensor.values.extend([scalar])
elif resolved_type == "Float64":
record.label["values"].float64_tensor.values.extend([scalar])
elif resolved_type == "Float32":
record.label["values"].float32_tensor.values.extend([scalar])
def _write_keys_tensor(resolved_type, record, vector):
"""Write the keys entries in the Record object.
Args:
resolved_type (str): Representing the type of key entry.
record (Record object): Record to which the key will be added.
vector (array): Array of keys to be added.
"""
if resolved_type == "Int32":
record.features["values"].int32_tensor.keys.extend(vector)
elif resolved_type == "Float64":
record.features["values"].float64_tensor.keys.extend(vector)
elif resolved_type == "Float32":
record.features["values"].float32_tensor.keys.extend(vector)
def _write_shape(resolved_type, record, scalar):
"""Writes the shape entry in the Record.
Args:
resolved_type (str): Representing the type of key entry.
record (Record object): Record to which the key will be added.
scalar (int or float32 or float64): The shape to added to the record.
"""
if resolved_type == "Int32":
record.features["values"].int32_tensor.shape.extend([scalar])
elif resolved_type == "Float64":
record.features["values"].float64_tensor.shape.extend([scalar])
elif resolved_type == "Float32":
record.features["values"].float32_tensor.shape.extend([scalar])
def _write_numpy_to_dense_tensor(file, array, labels=None):
"""Writes a numpy array to a dense record.
Args:
file (file-like object): File-like object where the
records will be written.
array (numpy array): Numpy array containing the features.
labels (numpy array): Numpy array containing the labels.
"""
# Validate shape of array and labels, resolve array and label types
if not len(array.shape) == 2:
raise ValueError("Array must be a Matrix")
if labels is not None:
if not len(labels.shape) == 1:
raise ValueError("Labels must be a Vector")
if labels.shape[0] not in array.shape:
raise ValueError(
"Label shape {} not compatible with array shape {}".format(
labels.shape, array.shape
)
)
resolved_label_type = _resolve_type(labels.dtype)
resolved_type = _resolve_type(array.dtype)
# Write each vector in array into a Record in the file object
record = Record()
for index, vector in enumerate(array):
record.Clear()
_write_feature_tensor(resolved_type, record, vector)
if labels is not None:
_write_label_tensor(resolved_label_type, record, labels[index])
_write_recordio(file, record.SerializeToString())
def _write_spmatrix_to_sparse_tensor(file, array, labels=None):
"""Writes a scipy sparse matrix to a sparse tensor.
Args:
file (file-like object): File-like object where the
records will be written.
array (array-like): A sparse matrix containing features.
labels (numpy array): Numpy array containing the labels.
"""
if not issparse(array):
raise TypeError("Array must be sparse")
# Validate shape of array and labels, resolve array and label types
if not len(array.shape) == 2:
raise ValueError("Array must be a Matrix")
if labels is not None:
if not len(labels.shape) == 1:
raise ValueError("Labels must be a Vector")
if labels.shape[0] not in array.shape:
raise ValueError(
"Label shape {} not compatible with array shape {}".format(
labels.shape, array.shape
)
)
resolved_label_type = _resolve_type(labels.dtype)
resolved_type = _resolve_type(array.dtype)
csr_array = array.tocsr()
n_rows, n_cols = csr_array.shape
record = Record()
for row_idx in range(n_rows):
record.Clear()
row = csr_array.getrow(row_idx)
# Write values
_write_feature_tensor(resolved_type, record, row.data)
# Write keys
_write_keys_tensor(resolved_type, record, row.indices.astype(np.uint64))
# Write labels
if labels is not None:
_write_label_tensor(resolved_label_type, record, labels[row_idx])
# Write shape
_write_shape(resolved_type, record, n_cols)
_write_recordio(file, record.SerializeToString())
# MXNet requires recordio records have length in bytes that's a multiple of 4
# This sets up padding bytes to append to the end of the record, for diferent
# amounts of padding required.
padding = {}
for amount in range(4):
if sys.version_info >= (3,):
padding[amount] = bytes([0x00 for _ in range(amount)])
else:
padding[amount] = bytearray([0x00 for _ in range(amount)])
_kmagic = 0xCED7230A
def _write_recordio(f, data):
"""Wraps the data with RecordIO magic and writes to file-like object.
Args:
f (file-like object): The file-like object to which the data point will be written.
data (numpy array): Data to write.
"""
length = len(data)
f.write(struct.pack("I", _kmagic))
f.write(struct.pack("I", length))
pad = (((length + 3) >> 2) << 2) - length
f.write(data)
f.write(padding[pad])
def _read_recordio(f):
"""Reads a RecordIO and unpacks the body.
Args:
f: File like object.
"""
while True:
try:
(read_kmagic,) = struct.unpack("I", f.read(4))
except struct.error:
return
assert read_kmagic == _kmagic
(len_record,) = struct.unpack("I", f.read(4))
pad = (((len_record + 3) >> 2) << 2) - len_record
yield f.read(len_record)
if pad:
f.read(pad) | /sagemaker_training-4.7.0.tar.gz/sagemaker_training-4.7.0/src/sagemaker_training/recordio.py | 0.822937 | 0.687138 | recordio.py | pypi |
from __future__ import absolute_import
import os
import socket
import sys
from retrying import retry
from sagemaker_training import _entry_point_type, environment, files, modules, runner
def run(
uri,
user_entry_point,
args,
env_vars=None,
wait=True,
capture_error=False,
runner_type=runner.ProcessRunnerType,
extra_opts=None,
):
"""Download, prepare and execute a compressed tar file from S3 or provided directory as a user
entry point. Run the user entry point, passing env_vars as environment variables and args
as command arguments.
If the entry point is:
- A Python package: executes the packages as >>> env_vars python -m module_name + args
- A Python script: executes the script as >>> env_vars python module_name + args
- Any other: executes the command as >>> env_vars /bin/sh -c ./module_name + args
Example:
>>>from sagemaker_training import entry_point, environment, mapping
>>>env = environment.Environment()
{'channel-input-dirs': {'training': '/opt/ml/input/training'},
'model_dir': '/opt/ml/model', ...}
>>>hyperparameters = environment.hyperparameters
{'batch-size': 128, 'model_dir': '/opt/ml/model'}
>>>args = mapping.to_cmd_args(hyperparameters)
['--batch-size', '128', '--model_dir', '/opt/ml/model']
>>>env_vars = mapping.to_env_vars()
['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training',
'MODEL_DIR':'/opt/ml/model', ...}
>>>entry_point.run('user_script', args, env_vars)
SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \
SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128
--model_dir /opt/ml/model
Args:
uri (str): The location of the module or script. This can be an S3 uri, a path to
a local directory, or a path to a local tarball.
user_entry_point (str): Name of the user provided entry point.
args ([str]): A list of program arguments.
env_vars (dict(str,str)): A map containing the environment variables to be written
(default: None).
wait (bool): If the user entry point should be run to completion before this method returns
(default: True).
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
runner_type (sagemaker_training.runner.RunnerType): The type of runner object to
be created (default: sagemaker_training.runner.ProcessRunnerType).
extra_opts (dict(str,str)): Additional options for running the entry point (default: None).
Currently, this only applies for MPI.
Returns:
sagemaker_training.process.ProcessRunner: The runner object responsible for
executing the entry point.
"""
env_vars = env_vars or {}
env_vars = env_vars.copy()
files.download_and_extract(uri=uri, path=environment.code_dir)
install(name=user_entry_point, path=environment.code_dir, capture_error=capture_error)
environment.write_env_vars(env_vars)
_wait_hostname_resolution()
return runner.get(runner_type, user_entry_point, args, env_vars, extra_opts).run(
wait, capture_error
)
def install(name, path=environment.code_dir, capture_error=False):
"""Install the user provided entry point to be executed as follows:
- add the path to sys path
- if the user entry point is a command, gives exec permissions to the script
Args:
name (str): Name of the script or module.
path (str): Path to directory where the entry point will be installed.
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
"""
if path not in sys.path:
sys.path.insert(0, path)
entry_point_type = _entry_point_type.get(path, name)
if entry_point_type is _entry_point_type.PYTHON_PACKAGE:
modules.install(path, capture_error)
elif entry_point_type is _entry_point_type.PYTHON_PROGRAM and modules.has_requirements(path):
modules.install_requirements(path, capture_error)
if entry_point_type is _entry_point_type.COMMAND:
os.chmod(os.path.join(path, name), 511)
@retry(stop_max_delay=1000 * 60 * 15, wait_exponential_multiplier=100, wait_exponential_max=30000)
def _dns_lookup(host):
"""Retrying DNS lookup on host."""
return socket.gethostbyname(host)
def _wait_hostname_resolution():
"""Wait for the hostname resolution of the container. This is known behavior as the cluster
boots up and has been documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo-running-container.html#your-algorithms-training-algo-running-container-dist-training
"""
for host in environment.Environment().hosts:
_dns_lookup(host) | /sagemaker_training-4.7.0.tar.gz/sagemaker_training-4.7.0/src/sagemaker_training/entry_point.py | 0.51879 | 0.155591 | entry_point.py | pypi |
"""This module contains functionality related to distributed training using
PT-XLA (PyTorch - Accelerated Linear Algebra)."""
from __future__ import absolute_import
import os
from sagemaker_training import (
_entry_point_type,
environment,
errors,
logging_config,
process,
)
logger = logging_config.get_logger()
class PyTorchXLARunner(process.ProcessRunner):
"""Responsible for PT-XLA distributed training."""
MESH_SERVICE_PORT = 53957
WORKER_PORT = 43857
def __init__(
self,
user_entry_point,
args,
env_vars,
processes_per_host,
master_hostname,
current_host,
hosts,
num_gpus,
):
"""Initialize a PyTorchXLARunner, which is responsible for distributed
training with PT-XLA.
Args:
user_entry_point (str): The name of the user entry point.
args ([str]): A list of arguments to include when executing the entry point.
env_vars (dict(str,str)): A dictionary of environment variables.
master_hostname (str): The master hostname.
current_host (str): The current hostname.
hosts ([str]): A list of hosts.
num_gpus (int): The number of GPUs available per host.
"""
super(PyTorchXLARunner, self).__init__(user_entry_point, args, env_vars, processes_per_host)
self._master_hostname = master_hostname
self._current_host = current_host
self._hosts = hosts
self._num_gpus = num_gpus
self._num_hosts = len(self._hosts)
self._rank = self._hosts.index(self._current_host)
def _setup(self): # type: () -> None
logger.info("Starting distributed training through PT-XLA Runtime.")
self._check_compatibility()
# Set NCCL logging to info to debug customer issues
os.environ["NCCL_DEBUG"] = "info"
# Use `simple` protocol to handle the out-of-order data delivery from EFA
os.environ["NCCL_PROTO"] = "simple"
# Use GPU RDMA when available (available only in p4d.24xlarge)
os.environ["FI_EFA_USE_DEVICE_RDMA"] = "1"
# Use multiple connections per GPU to better saturate the EFA bandwidth
os.environ["OFI_NCCL_NIC_DUP_CONNS"] = str(self._num_gpus)
# Set cluster configuration for XLA runtime
os.environ["XRT_HOST_ORDINAL"] = str(self._rank)
os.environ["XRT_SHARD_WORLD_SIZE"] = str(self._num_hosts)
address = "localservice:{};{}:" + str(self.WORKER_PORT)
os.environ["XRT_WORKERS"] = "|".join(
[address.format(i, host) for i, host in enumerate(self._hosts)]
)
os.environ["GPU_NUM_DEVICES"] = str(self._num_gpus)
if self._num_hosts > 1:
os.environ[
"XRT_MESH_SERVICE_ADDRESS"
] = f"{self._master_hostname}:{self.MESH_SERVICE_PORT}"
logger.info("Completed environment setup for distributed training through PT-XLA Runtime.")
def _create_command(self):
entrypoint_type = _entry_point_type.get(environment.code_dir, self._user_entry_point)
if entrypoint_type is _entry_point_type.PYTHON_PACKAGE:
raise errors.SMTrainingCompilerConfigurationError(
"Distributed Training through PT-XLA is not supported for Python packages. "
"Please use a python script as the entry-point"
)
if entrypoint_type is _entry_point_type.PYTHON_PROGRAM:
return self._pytorch_xla_command() + [self._user_entry_point] + self._args
else:
raise errors.SMTrainingCompilerConfigurationError(
"Distributed Training through PT-XLA is only supported for Python scripts. "
"Please use a python script as the entry-point"
)
def _pytorch_xla_command(self):
return self._python_command() + [
"-m",
"torch_xla.distributed.xla_spawn",
"--num_gpus",
str(self._num_gpus),
]
def _check_compatibility(self):
self._check_processor_compatibility()
self._check_for_torch_xla()
self._check_for_sagemaker_integration()
def _check_for_sagemaker_integration(self):
# pylint: disable=no-self-use
try:
import torch_xla.distributed.xla_spawn # pylint: disable=unused-import # noqa: F401
except ModuleNotFoundError as exception:
raise errors.SMTrainingCompilerConfigurationError(
"Unable to find SageMaker integration code in PT-XLA. "
"AWS SageMaker adds custom code on top of open source "
"PT-XLA to provide platform specific "
"optimizations. These SageMaker specific binaries are"
" shipped as part of our Deep Learning Containers."
" Please refer to "
"https://github.com/aws/deep-learning-containers"
"/blob/master/available_images.md"
) from exception
def _check_for_torch_xla(self):
# pylint: disable=no-self-use
try:
import torch_xla # pylint: disable=unused-import # noqa: F401
except ModuleNotFoundError as exception:
raise errors.SMTrainingCompilerConfigurationError(
"Unable to find PT-XLA in the execution environment. "
"This distribution mechanism requires PT-XLA to be available"
" in the execution environment. "
"SageMaker Training Compiler provides ready-to-use containers with PT-XLA. "
"Please refer to https://github.com/aws/deep-learning-containers"
"/blob/master/available_images.md "
) from exception
def _check_processor_compatibility(self):
if not self._num_gpus > 0:
raise errors.SMTrainingCompilerConfigurationError(
"Distributed training through PT-XLA is only supported for GPUs."
) | /sagemaker_training-4.7.0.tar.gz/sagemaker_training-4.7.0/src/sagemaker_training/pytorch_xla.py | 0.875121 | 0.330498 | pytorch_xla.py | pypi |
from __future__ import absolute_import
import contextlib
import json
import os
import shutil
import tarfile
import tempfile
import boto3
from six.moves.urllib import parse
from sagemaker_training import environment, logging_config, params
logger = logging_config.get_logger()
def write_success_file(): # type: () -> None
"""Create a file 'success' when training is successful. This file doesn't need to
have any content.
See: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html
"""
file_path = os.path.join(environment.output_dir, "success")
empty_content = ""
write_file(file_path, empty_content)
def write_failure_file(failure_msg): # type: (str) -> None
"""Create a file 'failure' if training fails after all algorithm output (for example,
logging) completes, the failure description should be written to this file. In a
DescribeTrainingJob response, Amazon SageMaker returns the first 1024 characters from
this file as FailureReason.
See: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html
Args:
failure_msg: The description of failure.
"""
file_path = os.path.join(environment.output_dir, "failure")
# Write failure file only if it does not exist
if not os.path.exists(file_path):
write_file(file_path, failure_msg)
else:
logger.info("Failure file exists. Skipping creation....")
@contextlib.contextmanager
def tmpdir(suffix="", prefix="tmp", directory=None): # type: (str, str, str) -> None
"""Create a temporary directory with a context manager. The file is deleted when the
context exits.
The prefix, suffix, and dir arguments are the same as for mkstemp().
Args:
suffix (str): If suffix is specified, the file name will end with that suffix,
otherwise there will be no suffix.
prefix (str): If prefix is specified, the file name will begin with that prefix;
otherwise, a default prefix is used.
directory (str): If directory is specified, the file will be created in that directory;
otherwise, a default directory is used.
Returns:
str: Path to the directory.
"""
tmp = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=directory)
yield tmp
shutil.rmtree(tmp)
def write_file(path, data, mode="w"): # type: (str, str, str) -> None
"""Write data to a file.
Args:
path (str): Path to the file.
data (str): Data to be written to the file.
mode (str): Mode which the file will be open.
"""
with open(path, mode) as f:
f.write(data)
def read_file(path, mode="r"):
"""Read data from a file.
Args:
path (str): Path to the file.
mode (str): mode which the file will be open.
Returns:
"""
with open(path, mode) as f:
return f.read()
def read_json(path): # type: (str) -> dict
"""Read a JSON file.
Args:
path (str): Path to the file.
Returns:
(dict[object, object]): A dictionary representation of the JSON file.
"""
with open(path, "r") as f:
return json.load(f)
def download_and_extract(uri, path): # type: (str, str) -> None
"""Download, prepare and install a compressed tar file from S3 or local directory as
an entry point.
SageMaker Python SDK saves the user provided entry points as compressed tar files in S3
Args:
uri (str): the location of the entry point.
path (bool): The path where the script will be installed. It will not download and
install the if the path already has the user entry point.
"""
if not os.path.exists(path):
os.makedirs(path)
if not os.listdir(path):
with tmpdir() as tmp:
if uri.startswith("s3://"):
dst = os.path.join(tmp, "tar_file")
s3_download(uri, dst)
with tarfile.open(name=dst, mode="r:gz") as t:
t.extractall(path=path)
elif os.path.isdir(uri):
if uri == path:
return
if os.path.exists(path):
shutil.rmtree(path)
shutil.copytree(uri, path)
elif tarfile.is_tarfile(uri):
with tarfile.open(name=uri, mode="r:gz") as t:
t.extractall(path=path)
else:
shutil.copy2(uri, path)
def s3_download(url, dst): # type: (str, str) -> None
"""Download a file from S3.
Args:
url (str): the s3 url of the file.
dst (str): the destination where the file will be saved.
"""
url = parse.urlparse(url)
if url.scheme != "s3":
raise ValueError("Expecting 's3' scheme, got: %s in %s" % (url.scheme, url))
bucket, key = url.netloc, url.path.lstrip("/")
region = os.environ.get("AWS_REGION", os.environ.get(params.REGION_NAME_ENV))
endpoint_url = os.environ.get(params.S3_ENDPOINT_URL, None)
s3 = boto3.resource("s3", region_name=region, endpoint_url=endpoint_url)
s3.Bucket(bucket).download_file(key, dst) | /sagemaker_training-4.7.0.tar.gz/sagemaker_training-4.7.0/src/sagemaker_training/files.py | 0.683525 | 0.185892 | files.py | pypi |
from __future__ import absolute_import
import collections
import collections.abc
import itertools
import json
import six
SplitResultSpec = collections.namedtuple("SplitResultSpec", "included excluded")
def to_env_vars(mapping): # type: (dict) -> dict
"""Transform a dictionary in a dictionary of env vars.
Example:
>>>env_vars = mapping.to_env_vars({'model_dir': '/opt/ml/model', 'batch_size': 25})
>>>
>>>print(args)
['MODEL_DIR', '/opt/ml/model', 'BATCH_SIZE', 25]
Args:
mapping (dict[str, object]): A Python mapping.
Returns:
(dict): Dictionary of env vars.
"""
def format_key(key):
"""Decode a key, adds a SM_ prefix to the key and upper case it."""
if key:
decoded_name = "SM_%s" % str(key).upper()
return decoded_name
else:
return ""
def format_value(_mapping):
if six.PY3 and isinstance(_mapping, six.binary_type):
# transforms a byte string (b'') in unicode
return _mapping.decode("latin1")
elif _mapping is None:
return ""
elif isinstance(_mapping, six.string_types):
return str(_mapping)
else:
return json.dumps(_mapping, sort_keys=True, separators=(",", ":"), ensure_ascii=True)
return {format_key(k): format_value(v) for k, v in mapping.items()}
def to_cmd_args(mapping): # type: (dict) -> list
"""Transform a dictionary in a list of cmd arguments.
Example:
>>>args = mapping.to_cmd_args({'model_dir': '/opt/ml/model', 'batch_size': 25})
>>>
>>>print(args)
['--model_dir', '/opt/ml/model', '--batch_size', 25]
Args:
mapping (dict[str, object]): A Python mapping.
Returns:
(list): List of cmd arguments.
"""
sorted_keys = sorted(mapping.keys())
def arg_name(obj):
string = _decode(obj)
if string:
return "--%s" % string if len(string) > 1 else "-%s" % string
else:
return ""
arg_names = [arg_name(argument) for argument in sorted_keys]
def arg_value(value):
if hasattr(value, "items"):
map_items = ["%s=%s" % (k, v) for k, v in sorted(value.items())]
return ",".join(map_items)
return _decode(value)
arg_values = [arg_value(mapping[key]) for key in sorted_keys]
items = zip(arg_names, arg_values)
return [item for item in itertools.chain.from_iterable(items)]
def _decode(obj): # type: (bytes or str or unicode or object) -> unicode # noqa ignore=F821
"""Decode an object to unicode.
Args:
obj (bytes or str or unicode or anything serializable): Object to be decoded.
Returns:
Object decoded in unicode.
"""
if obj is None:
return ""
if six.PY3 and isinstance(obj, six.binary_type):
# transforms a byte string (b'') in unicode
return obj.decode("latin1")
elif six.PY3:
# PY3 strings are unicode.
return str(obj)
elif isinstance(obj, six.text_type):
# returns itself if it is unicode
return obj
else:
# decodes pY2 string to unicode
return str(obj).decode("utf-8")
def split_by_criteria(
dictionary, keys=None, prefix=None
): # type: (dict, set or list or tuple) -> SplitResultSpec
"""Split a dictionary in two by the provided keys.
Args:
dictionary (dict[str, object]): A Python dictionary.
keys (sequence [str]): A sequence of keys which will be added the split criteria.
prefix (str): A prefix which will be added the split criteria.
Returns:
`SplitResultSpec` : A collections.namedtuple with the following attributes:
* Args:
included (dict[str, object]: A dictionary with the keys included in the criteria.
excluded (dict[str, object]: A dictionary with the keys not included in the
criteria.
"""
keys = keys or []
keys = set(keys)
included_items = {
k: dictionary[k]
for k in dictionary.keys()
if k in keys or (prefix and k.startswith(prefix))
}
excluded_items = {k: dictionary[k] for k in dictionary.keys() if k not in included_items}
return SplitResultSpec(included=included_items, excluded=excluded_items)
class MappingMixin(collections.abc.Mapping):
"""A mixin class that allows for the creation of a dictionary like object,
with any built-in function that works with a dictionary. This is used by the
environment._Env base class.
"""
def properties(self): # type: () -> list
"""
Returns:
(list[str]) List of public properties.
"""
_type = type(self)
return [_property for _property in dir(_type) if self._is_property(_property)]
def _is_property(self, _property):
return isinstance(getattr(type(self), _property), property)
def __getitem__(self, k):
"""Built-in method override."""
if not self._is_property(k):
raise KeyError("Trying to access non property %s" % k)
return getattr(self, k)
def __len__(self):
"""Built-in method override."""
return len(self.properties())
def __iter__(self):
"""Built-in method override."""
items = {_property: getattr(self, _property) for _property in self.properties()}
return iter(items)
def __str__(self):
"""Built-in method override."""
return str(dict(self)) | /sagemaker_training-4.7.0.tar.gz/sagemaker_training-4.7.0/src/sagemaker_training/mapping.py | 0.905279 | 0.221793 | mapping.py | pypi |
"""This module contains utilities to encode and decode different content types."""
from __future__ import absolute_import
import csv
import io
import json
import numpy as np
from scipy.sparse import issparse
from six import BytesIO, StringIO
from sagemaker_training import content_types, errors
from sagemaker_training.recordio import (
_write_numpy_to_dense_tensor,
_write_spmatrix_to_sparse_tensor,
)
def array_to_npy(array_like):
"""Convert an array-like object to the NPY format.
To understand what an array-like object is, please see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): Array-like object to be converted to NPY.
Returns:
(obj): NPY array.
"""
buffer = BytesIO()
np.save(buffer, array_like)
return buffer.getvalue()
def npy_to_numpy(npy_array):
"""Convert an NPY array into numpy.
Args:
npy_array (npy array): NPY array to be converted.
Returns:
(np.array): Converted numpy array.
"""
stream = BytesIO(npy_array)
return np.load(stream, allow_pickle=True)
def array_to_json(array_like):
"""Convert an array-like object to JSON.
To understand what an array-like object is, please see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): Array-like object to be
converted to JSON.
Returns:
(str): Object serialized to JSON.
"""
def default(_array_like):
if hasattr(_array_like, "tolist"):
return _array_like.tolist()
return json.JSONEncoder().default(_array_like)
return json.dumps(array_like, default=default)
def json_to_numpy(string_like, dtype=None):
"""Convert a JSON object to a numpy array.
Args:
string_like (str): JSON string.
dtype (dtype, optional): Data type of the resulting array. If None,
the dtypes will be determined by the
contents of each column, individually.
This argument can only be used to
'upcast' the array. For downcasting,
use the .astype(t) method.
Returns:
(np.array): Numpy array.
"""
data = json.loads(string_like)
return np.array(data, dtype=dtype)
def csv_to_numpy(string_like, dtype=None):
"""Convert a CSV object to a numpy array.
Args:
string_like (str): CSV string.
dtype (dtype, optional): Data type of the resulting array. If None, the
dtypes will be determined by the contents of
each column, individually. This argument can
only be used to 'upcast' the array. For
downcasting, use the .astype(t) method.
Returns:
(np.array): Numpy array.
"""
try:
stream = StringIO(string_like)
reader = csv.reader(stream, delimiter=",", quotechar='"', doublequote=True, strict=True)
array = np.array([row for row in reader]).squeeze()
array = array.astype(dtype)
except ValueError as e:
if dtype is not None:
raise errors.ClientError(
"Error while writing numpy array: {}. dtype is: {}".format(e, dtype)
)
except Exception as e:
raise errors.ClientError("Error while decoding csv: {}".format(e))
return array
def array_to_csv(array_like):
"""Convert an array like object to CSV.
To understand what an array-like object is, please see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): Array-like object to be converted to CSV.
Returns:
(str): Object serialized to CSV.
"""
array = np.array(array_like)
if len(array.shape) == 1:
array = np.reshape(array, (array.shape[0], 1)) # pylint: disable=unsubscriptable-object
try:
stream = StringIO()
writer = csv.writer(
stream, lineterminator="\n", delimiter=",", quotechar='"', doublequote=True, strict=True
)
writer.writerows(array)
return stream.getvalue()
except csv.Error as e:
raise errors.ClientError("Error while encoding csv: {}".format(e))
def array_to_recordio_protobuf(array_like, labels=None):
"""Convert an array like object to recordio-protobuf format.
To understand what an array-like object is, please see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or scipy.sparse.csr_matrix): Array-like object to be
converted to recordio-protobuf.
labels (np.array or scipy.sparse.csr_matrix): Array-like object representing
the labels to be encoded.
Returns:
buffer: Bytes buffer recordio-protobuf.
"""
if len(array_like.shape) == 1:
array_like = array_like.reshape(1, array_like.shape[0])
assert len(array_like.shape) == 2, "Expecting a 1 or 2 dimensional array"
buffer = io.BytesIO()
if issparse(array_like):
_write_spmatrix_to_sparse_tensor(buffer, array_like, labels)
else:
_write_numpy_to_dense_tensor(buffer, array_like, labels)
buffer.seek(0)
return buffer.getvalue()
encoders_map = {
content_types.NPY: array_to_npy,
content_types.CSV: array_to_csv,
content_types.JSON: array_to_json,
}
_decoders_map = {
content_types.NPY: npy_to_numpy,
content_types.CSV: csv_to_numpy,
content_types.JSON: json_to_numpy,
}
def decode(obj, content_type):
"""Decode an object of one of the default content types to a numpy array.
Args:
obj (object): Object to be decoded.
content_type (str): Content type to be used.
Returns:
np.array: Decoded object.
"""
try:
decoder = _decoders_map[content_type]
return decoder(obj)
except KeyError:
raise errors.UnsupportedFormatError(content_type)
def encode(array_like, content_type):
"""Encode an array-like object in a specific content_type to a numpy array.
To understand what an array-like object is, please see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): Array-like object to be
converted to numpy.
content_type (str): Content type to be used.
Returns:
(np.array): Object converted as numpy array.
"""
try:
encoder = encoders_map[content_type]
return encoder(array_like)
except KeyError:
raise errors.UnsupportedFormatError(content_type) | /sagemaker_training-4.7.0.tar.gz/sagemaker_training-4.7.0/src/sagemaker_training/encoders.py | 0.899784 | 0.589746 | encoders.py | pypi |
"""This module contains custom exceptions."""
from __future__ import absolute_import
import textwrap
import six
class ClientError(Exception):
"""Error class used to separate framework and user errors."""
class SMTrainingCompilerConfigurationError(Exception):
"""Error class used to separate configuration errors"""
class _CalledProcessError(ClientError):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
Attributes:
cmd, return_code, output
"""
def __init__(self, cmd, return_code=None, output=None, info=None):
self.return_code = str(return_code)
self.cmd = cmd
self.output = output
self.extra_info = info
super(_CalledProcessError, self).__init__()
def __str__(self):
if six.PY3 and self.output:
# error_msg = "%s" % self.output.decode("latin1")
if isinstance(self.output, bytes):
error_msg = "%s" % self.output.decode("utf-8")
else:
error_msg = "%s" % self.output
elif self.output:
error_msg = "%s" % self.output
else:
error_msg = ""
if self.extra_info is None:
message = '%s:\nExitCode %s\nErrorMessage "%s"\nCommand "%s"' % (
type(self).__name__,
self.return_code,
error_msg,
self.cmd,
)
else:
message = '%s:\nExitCode %s\nErrorMessage "%s"\nExtraInfo "%s"\nCommand "%s"' % (
type(self).__name__,
self.return_code,
error_msg,
self.extra_info,
self.cmd,
)
return message.strip()
class InstallModuleError(_CalledProcessError):
"""Error class indicating a module failed to install."""
class InstallRequirementsError(_CalledProcessError):
"""Error class indicating a module failed to install."""
class ImportModuleError(ClientError):
"""Error class indicating a module failed to import."""
class ExecuteUserScriptError(_CalledProcessError):
"""Error class indicating a user script failed to execute."""
class ChannelDoesNotExistError(Exception):
"""Error class indicating a channel does not exist."""
def __init__(self, channel_name):
super(ChannelDoesNotExistError, self).__init__(
"Channel %s is not a valid channel" % channel_name
)
class UnsupportedFormatError(Exception):
"""Error class indicating a content type is not supported by the current framework."""
def __init__(self, content_type, **kwargs):
self.message = textwrap.dedent(
"""Content type %s is not supported by this framework.
Please implement input_fn to to deserialize the request data or an output_fn to
serialize the response. For more information, see the SageMaker Python SDK README."""
% content_type
)
super(UnsupportedFormatError, self).__init__(self.message, **kwargs) | /sagemaker_training-4.7.0.tar.gz/sagemaker_training-4.7.0/src/sagemaker_training/errors.py | 0.862453 | 0.154058 | errors.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.