repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/import_utils.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import platform
from cuml.internals.safe_imports import gpu_only_import, UnavailableError
from distutils.version import LooseVersion
numba = gpu_only_import("numba")
def has_dask():
try:
import dask # NOQA
import dask.distributed # NOQA
import dask.dataframe # NOQA
return True
except ImportError:
return False
def has_dask_cudf():
try:
import dask_cudf # NOQA
return True
except ImportError:
return False
def has_dask_sql():
try:
import dask_sql # NOQA
return True
except ImportError:
return False
def has_cupy():
try:
import cupy # NOQA
return True
except ImportError:
return False
def has_ucp():
try:
import ucp # NOQA
return True
except ImportError:
return False
def has_umap():
if platform.processor() == "aarch64":
return False
try:
import umap # NOQA
return True
except ImportError:
return False
def has_lightgbm():
try:
import lightgbm # NOQA
return True
except ImportError:
return False
def has_xgboost():
try:
import xgboost # NOQA
return True
except ImportError:
return False
except Exception as ex:
import warnings
warnings.warn(
(
"The XGBoost library was found but raised an exception during "
"import. Importing xgboost will be skipped. "
"Error message:\n{}"
).format(str(ex))
)
return False
def has_pytest_benchmark():
try:
import pytest_benchmark # NOQA
return True
except ImportError:
return False
def check_min_dask_version(version):
try:
import dask
return LooseVersion(dask.__version__) >= LooseVersion(version)
except ImportError:
return False
def check_min_numba_version(version):
try:
return LooseVersion(str(numba.__version__)) >= LooseVersion(version)
except UnavailableError:
return False
def check_min_cupy_version(version):
if has_cupy():
import cupy
return LooseVersion(str(cupy.__version__)) >= LooseVersion(version)
else:
return False
def has_scipy(raise_if_unavailable=False):
try:
import scipy # NOQA
return True
except ImportError:
if not raise_if_unavailable:
return False
else:
raise ImportError("Scipy is not available.")
def has_sklearn():
try:
import sklearn # NOQA
return True
except ImportError:
return False
def has_hdbscan(raise_if_unavailable=False):
try:
import hdbscan # NOQA
return True
except ImportError:
if not raise_if_unavailable:
return False
else:
raise ImportError(
"hdbscan is not available. Please install hdbscan."
)
def has_shap(min_version="0.37"):
try:
import shap # noqa
if min_version is None:
return True
else:
return LooseVersion(str(shap.__version__)) >= LooseVersion(
min_version
)
except ImportError:
return False
def has_daskglm(min_version=None):
try:
import dask_glm # noqa
if min_version is None:
return True
else:
return LooseVersion(str(dask_glm.__version__)) >= LooseVersion(
min_version
)
except ImportError:
return False
def dummy_function_always_false(*args, **kwargs):
return False
class DummyClass(object):
pass
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/callbacks_implems.h | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <Python.h>
#include <cuml/common/callback.hpp>
#include <iostream>
namespace ML {
namespace Internals {
class DefaultGraphBasedDimRedCallback : public GraphBasedDimRedCallback {
public:
PyObject* get_numba_matrix(void* embeddings)
{
PyObject* pycl = (PyObject*)this->pyCallbackClass;
if (isFloat) {
return PyObject_CallMethod(
pycl, "get_numba_matrix", "(l(ll)s)", embeddings, n, n_components, "float32");
} else {
return PyObject_CallMethod(
pycl, "get_numba_matrix", "(l(ll)s)", embeddings, n, n_components, "float64");
}
}
void on_preprocess_end(void* embeddings) override
{
PyObject* numba_matrix = get_numba_matrix(embeddings);
PyObject* res =
PyObject_CallMethod(this->pyCallbackClass, "on_preprocess_end", "(O)", numba_matrix);
Py_DECREF(numba_matrix);
Py_DECREF(res);
}
void on_epoch_end(void* embeddings) override
{
PyObject* numba_matrix = get_numba_matrix(embeddings);
PyObject* res = PyObject_CallMethod(this->pyCallbackClass, "on_epoch_end", "(O)", numba_matrix);
Py_DECREF(numba_matrix);
Py_DECREF(res);
}
void on_train_end(void* embeddings) override
{
PyObject* numba_matrix = get_numba_matrix(embeddings);
PyObject* res = PyObject_CallMethod(this->pyCallbackClass, "on_train_end", "(O)", numba_matrix);
Py_DECREF(numba_matrix);
Py_DECREF(res);
}
public:
PyObject* pyCallbackClass = nullptr;
};
} // namespace Internals
} // namespace ML
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/internals.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import gpu_only_import_from
from_cuda_array_interface = gpu_only_import_from(
'numba.cuda.api',
'from_cuda_array_interface'
)
cdef extern from "Python.h":
cdef cppclass PyObject
IF GPUBUILD == 1:
from libc.stdint cimport uintptr_t
cdef extern from "callbacks_implems.h" namespace "ML::Internals":
cdef cppclass Callback:
pass
cdef cppclass DefaultGraphBasedDimRedCallback(Callback):
void setup(int n, int d) except +
void on_preprocess_end(void* embeddings) except +
void on_epoch_end(void* embeddings) except +
void on_train_end(void* embeddings) except +
PyObject* pyCallbackClass
cdef class PyCallback:
def get_numba_matrix(self, embeddings, shape, typestr):
sizeofType = 4 if typestr == "float32" else 8
desc = {
'shape': shape,
'strides': (shape[1]*sizeofType, sizeofType),
'typestr': typestr,
'data': [embeddings],
'order': 'C',
'version': 1
}
return from_cuda_array_interface(desc)
cdef class GraphBasedDimRedCallback(PyCallback):
"""
Usage
-----
class CustomCallback(GraphBasedDimRedCallback):
def on_preprocess_end(self, embeddings):
print(embeddings.copy_to_host())
def on_epoch_end(self, embeddings):
print(embeddings.copy_to_host())
def on_train_end(self, embeddings):
print(embeddings.copy_to_host())
reducer = UMAP(n_components=2, callback=CustomCallback())
"""
cdef DefaultGraphBasedDimRedCallback native_callback
def __init__(self):
self.native_callback.pyCallbackClass = <PyObject *><void*>self
def get_native_callback(self):
if self.native_callback.pyCallbackClass == NULL:
raise ValueError(
"You need to call `super().__init__` in your callback."
)
return <uintptr_t>&(self.native_callback)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/constants.py | #
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
CUML_WRAPPED_FLAG = "__cuml_is_wrapped"
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/array.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import operator
import pickle
from cuml.internals.global_settings import GlobalSettings
from cuml.internals.logger import debug
from cuml.internals.mem_type import MemoryType, MemoryTypeError
from cuml.internals.memory_utils import class_with_cupy_rmm, with_cupy_rmm
from cuml.internals.safe_imports import (
cpu_only_import,
cpu_only_import_from,
gpu_only_import,
gpu_only_import_from,
null_decorator,
return_false,
safe_import,
safe_import_from,
)
from typing import Tuple
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
np = cpu_only_import("numpy")
rmm = gpu_only_import("rmm")
host_xpy = safe_import("numpy", alt=cp)
cuda = gpu_only_import_from("numba", "cuda")
cached_property = safe_import_from(
"functools", "cached_property", alt=null_decorator
)
CudfBuffer = gpu_only_import_from("cudf.core.buffer", "Buffer")
CudfDataFrame = gpu_only_import_from("cudf", "DataFrame")
CudfIndex = gpu_only_import_from("cudf", "Index")
CudfSeries = gpu_only_import_from("cudf", "Series")
DaskCudfDataFrame = gpu_only_import_from("dask_cudf.core", "DataFrame")
DaskCudfSeries = gpu_only_import_from("dask_cudf.core", "Series")
DaskDataFrame = gpu_only_import_from("dask.dataframe", "DataFrame")
DaskSeries = gpu_only_import_from("dask.dataframe", "Series")
DeviceBuffer = gpu_only_import_from("rmm", "DeviceBuffer")
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
PandasDataFrame = cpu_only_import_from("pandas", "DataFrame")
PandasIndex = cpu_only_import_from("pandas", "Index")
PandasSeries = cpu_only_import_from("pandas", "Series")
is_numba_array = gpu_only_import_from(
"numba.cuda", "is_cuda_array", alt=return_false
)
cp_ndarray = gpu_only_import_from("cupy", "ndarray")
np_ndarray = cpu_only_import_from("numpy", "ndarray")
numba_devicearray = gpu_only_import_from("numba.cuda", "devicearray")
_specific_supported_types = (
np_ndarray,
cp_ndarray,
CudfSeries,
CudfDataFrame,
PandasSeries,
PandasDataFrame,
)
def _order_to_strides(order, shape, dtype):
"""
Given memory order, shape and dtype, return expected strides
"""
dtype = host_xpy.dtype(dtype)
if order == "C":
strides = (
host_xpy.append(
host_xpy.cumprod(host_xpy.array(shape[:0:-1]))[::-1], 1
)
* dtype.itemsize
)
elif order == "F":
strides = (
host_xpy.cumprod(host_xpy.array([1, *shape[:-1]])) * dtype.itemsize
)
else:
raise ValueError(
"Must specify strides or order, and order must"
' be one of "C" or "F"'
)
return strides
def _determine_memory_order(shape, strides, dtype, default="C"):
"""
Given strides, shape and dtype for an array, return memory order
If order is neither C nor F contiguous, return None. If array is both C and
F contiguous, return default if given or 'C' otherwise.
"""
if strides is None:
return "C"
if len(shape) < 2:
return "C" if default in (None, "K") else default
shape = host_xpy.array(shape)
strides = host_xpy.array(strides)
itemsize = host_xpy.dtype(dtype).itemsize
c_contiguous = False
f_contiguous = False
if strides[-1] == itemsize:
if host_xpy.all(strides[:-1] == shape[1:] * strides[1:]):
c_contiguous = True
if strides[0] == itemsize:
if host_xpy.all(strides[1:] == shape[:-1] * strides[:-1]):
f_contiguous = True
if c_contiguous and f_contiguous:
return "C" if default in (None, "K") else default
elif c_contiguous:
return "C"
elif f_contiguous:
return "F"
return None
@class_with_cupy_rmm(ignore_pattern=["serialize"])
class CumlArray:
"""
Array represents an abstracted array allocation. It can be instantiated by
itself or can be instantiated by ``__cuda_array_interface__`` or
``__array_interface__`` compliant arrays, in which case it'll keep a
reference to that data underneath. Also can be created from a pointer,
specifying the characteristics of the array, in that case the owner of the
data referred to by the pointer should be specified explicitly.
Parameters
----------
data : rmm.DeviceBuffer, cudf.Buffer, array_like, int, bytes, bytearray or\
memoryview
An array-like object or integer representing a
device or host pointer to pre-allocated memory.
owner : object, optional
Python object to which the lifetime of the memory
allocation is tied. If provided, a reference to this
object is kept in this Buffer.
dtype : data-type, optional
Any object that can be interpreted as a numpy or cupy data type.
shape : int or tuple of ints, optional
Shape of created array.
order: string, optional
Whether to create a F-major or C-major array.
mem_type: {'host', 'device'}, optional
Whether data are on host or device.
validate: bool, default=None
Whether or not to check final array attributes against input options.
If None, validation will occur only for CumlArray input and input that
does not implement the array interface protocol and for which
additional options were explicitly specified.
Attributes
----------
ptr : int
Pointer to the data
size : int
Size of the array data in bytes
_owner : Python Object
Object that owns the data of the array
shape : tuple of ints
Shape of the array
order : {'F', 'C'}
'F' or 'C' to indicate Fortran-major or C-major order of the array
strides : tuple of ints
Strides of the data
mem_type : MemoryType
Memory type for how data are stored
__array_interface__ : dictionary
``__array_interface__`` to interop with other libraries. This
attribute is only present if data are host-accessible.
__cuda_array_interface__ : dictionary
``__cuda_array_interface__`` to interop with other libraries. This
attribute is only present if data are device-accessible.
Notes
-----
cuml Array is not meant as an end-user array library. It is meant for
cuML/RAPIDS developer consumption. Therefore it contains the minimum
functionality. Its functionality is hidden by base.pyx to provide
automatic output format conversion so that the users see the important
attributes in whatever format they prefer.
Todo: support cuda streams in the constructor. See:
https://github.com/rapidsai/cuml/issues/1712
https://github.com/rapidsai/cuml/pull/1396
"""
@nvtx_annotate(
message="internals.CumlArray.__init__",
category="utils",
domain="cuml_python",
)
def __init__(
self,
data=None,
index=None,
owner=None,
dtype=None,
shape=None,
order=None,
strides=None,
mem_type=None,
validate=None,
):
if dtype is not None:
dtype = GlobalSettings().xpy.dtype(dtype)
self._index = index
if mem_type is not None:
mem_type = MemoryType.from_str(mem_type)
self._mem_type = mem_type
if hasattr(data, "__cuda_array_interface__"):
# using CuPy allows processing delayed array wrappers
# like cumlarray without added complexity
data = cp.asarray(data)
# need to reshape if user requests specific shape
if shape is not None:
data = data.reshape(shape)
self._array_interface = data.__cuda_array_interface__
if mem_type in (None, MemoryType.mirror):
self._mem_type = MemoryType.device
self._owner = data
else: # Not a CUDA array object
if hasattr(data, "__array_interface__"):
self._array_interface = data.__array_interface__
self._mem_type = MemoryType.host
self._owner = data
else: # Must construct array interface
if dtype is None:
if hasattr(data, "dtype"):
dtype = data.dtype
else:
raise ValueError(
"Must specify dtype when data is passed as a"
" {}".format(type(data))
)
if isinstance(data, (CudfBuffer, DeviceBuffer)):
self._mem_type = MemoryType.device
elif mem_type is None:
if GlobalSettings().memory_type in (
None,
MemoryType.mirror,
):
raise ValueError(
"Must specify mem_type when data is passed as a"
" {}".format(type(data))
)
self._mem_type = GlobalSettings().memory_type
try:
data = data.ptr
if shape is None:
shape = (data.size,)
self._owner = data
except AttributeError: # Not a buffer object
pass
if isinstance(data, int):
self._owner = owner
else:
if self._mem_type is None:
cur_xpy = GlobalSettings().xpy
else:
cur_xpy = self._mem_type.xpy
# Assume integers are pointers. For everything else,
# convert it to an array and retry
try:
new_data = cur_xpy.frombuffer(data, dtype=dtype)
except TypeError:
new_data = cur_xpy.asarray(data, dtype=dtype)
if shape is not None:
new_order = order if order is not None else "C"
new_data = cur_xpy.reshape(
new_data, shape, order=new_order
)
if index is None:
try:
self._index = data.index
except AttributeError:
pass
return self.__init__(
data=new_data,
index=self._index,
owner=owner,
dtype=dtype,
shape=shape,
order=order,
mem_type=mem_type,
)
if shape is None:
raise ValueError(
"shape must be specified when data is passed as a"
" pointer"
)
if strides is None:
try:
if len(shape) == 0:
strides = None
elif len(shape) == 1:
strides == (dtype.itemsize,)
except TypeError: # Shape given as integer
strides = (dtype.itemsize,)
if strides is None:
strides = _order_to_strides(order, shape, dtype)
self._array_interface = {
"shape": shape,
"strides": strides,
"typestr": dtype.str,
"data": (data, False),
"version": 3,
}
# Derive any information required for attributes that has not
# already been derived
if mem_type in (None, MemoryType.mirror):
if self._mem_type in (None, MemoryType.mirror):
raise ValueError(
"Could not infer memory type from input data. Pass"
" mem_type explicitly."
)
mem_type = self._mem_type
if self._array_interface["strides"] is None:
try:
self._array_interface["strides"] = data.strides
except AttributeError:
self._array_interface["strides"] = strides
if (
isinstance(data, CumlArray)
or not (
hasattr(data, "__array_interface__")
or hasattr(data, "__cuda_array_interface__")
)
) and (dtype is not None and shape is not None and order is not None):
self._array_interface["shape"] = shape
self._array_interface["strides"] = strides
else:
if validate is None:
validate = True
array_strides = self._array_interface["strides"]
if array_strides is not None:
array_strides = host_xpy.array(array_strides)
if (
array_strides is None
or len(array_strides) == 1
or host_xpy.all(array_strides[1:] == array_strides[:-1])
) and order not in ("K", None):
self._order = order
else:
self._order = _determine_memory_order(
self._array_interface["shape"],
self._array_interface["strides"],
self._array_interface["typestr"],
default=order,
)
# Validate final data against input arguments
if validate:
if mem_type != self._mem_type:
raise MemoryTypeError(
"Requested mem_type inconsistent with input data object"
)
if (
dtype is not None
and dtype.str != self._array_interface["typestr"]
):
raise ValueError(
"Requested dtype inconsistent with input data object"
)
if owner is not None and self._owner is not owner:
raise ValueError(
"Specified owner object does not seem to match data"
)
if shape is not None:
shape_arr = host_xpy.array(shape)
if len(shape_arr.shape) == 0:
shape_arr = host_xpy.reshape(shape_arr, (1,))
if not host_xpy.array_equal(
host_xpy.array(self._array_interface["shape"]), shape_arr
):
raise ValueError(
"Specified shape inconsistent with input data object"
)
if (
strides is not None
and self._array_interface["strides"] is not None
and not host_xpy.array_equal(
host_xpy.array(self._array_interface["strides"]),
host_xpy.array(strides),
)
):
raise ValueError(
"Specified strides inconsistent with input data object"
)
if order is not None and order != "K" and self._order != order:
raise ValueError(
"Specified order inconsistent with array stride"
)
@property
def ptr(self):
return self._array_interface["data"][0]
@cached_property
def dtype(self):
return self._mem_type.xpy.dtype(self._array_interface["typestr"])
@property
def mem_type(self):
return self._mem_type
@property
def is_device_accessible(self):
return self._mem_type.is_device_accessible
@property
def is_host_accessible(self):
return self._mem_type.is_host_accessible
@cached_property
def size(self):
return (
host_xpy.product(self._array_interface["shape"])
* host_xpy.dtype(self._array_interface["typestr"]).itemsize
)
@property
def order(self):
return self._order
@property
def strides(self):
return self._array_interface["strides"]
@property
def shape(self):
return self._array_interface["shape"]
@property
def ndim(self):
return len(self._array_interface["shape"])
@cached_property
def is_contiguous(self):
return self.order in ("C", "F")
# We use the index as a property to allow for validation/processing
# in the future if needed
@property
def index(self):
return self._index
@index.setter
def index(self, index):
self._index = index
@property
def __cuda_array_interface__(self):
if not self._mem_type.is_device_accessible:
raise AttributeError(
"Host-only array does not have __cuda_array_interface__"
)
return self._array_interface
@property
def __array_interface__(self):
if not self._mem_type.is_host_accessible:
raise AttributeError(
"Device-only array does not have __array_interface__"
)
return self._array_interface
@with_cupy_rmm
def __getitem__(self, slice):
return CumlArray(
data=self._mem_type.xpy.asarray(self).__getitem__(slice)
)
@with_cupy_rmm
def __iter__(self):
arr = self._mem_type.xpy.asarray(self)
yield from arr
def __setitem__(self, slice, value):
self._mem_type.xpy.asarray(self).__setitem__(slice, value)
def __len__(self):
try:
return self.shape[0]
except IndexError:
return 0
def _operator_overload(self, other, fn):
return CumlArray(fn(self.to_output("array"), other))
def __add__(self, other):
return self._operator_overload(other, operator.add)
def __sub__(self, other):
return self._operator_overload(other, operator.sub)
def __lt__(self, other):
return self._operator_overload(other, operator.lt)
def __le__(self, other):
return self._operator_overload(other, operator.le)
def __gt__(self, other):
return self._operator_overload(other, operator.gt)
def __ge__(self, other):
return self._operator_overload(other, operator.ge)
def __eq__(self, other):
try:
return self._operator_overload(other, operator.eq)
except TypeError:
return False
def __or__(self, other):
return self._operator_overload(other, operator.or_)
def any(self):
return self.to_output("array").any()
def all(self):
return self.to_output("array").all()
def item(self):
return self._mem_type.xpy.asarray(self).item()
@nvtx_annotate(
message="common.CumlArray.to_output",
category="utils",
domain="cuml_python",
)
def to_output(
self, output_type="array", output_dtype=None, output_mem_type=None
):
"""
Convert array to output format
Parameters
----------
output_type : string
Format to convert the array to. Acceptable formats are:
- 'array' - to cupy/numpy array depending on memory type
- 'numba' - to numba device array
- 'dataframe' - to cuDF/Pandas DataFrame depending on memory type
- 'series' - to cuDF/Pandas Series depending on memory type
- 'df_obj' - to cuDF/Pandas Series if array is single
dimensional, to cuDF/Pandas Dataframe otherwise
- 'cupy' - to cupy array
- 'numpy' - to numpy array
- 'cudf' - to cuDF Series/DataFrame depending on shape of data
- 'pandas' - to Pandas Series/DataFrame depending on shape of data
output_mem_type : {'host, 'device'}, optional
Optionally convert array to given memory type. If `output_type`
already indicates a specific memory type, `output_type` takes
precedence. If the memory type is not otherwise indicated, the data
are kept on their current device.
output_dtype : string, optional
Optionally cast the array to a specified dtype, creating
a copy if necessary.
"""
if output_type == "cupy":
output_type = "array"
output_mem_type = MemoryType.device
elif output_type == "numpy":
output_type = "array"
output_mem_type = MemoryType.host
elif output_type == "cudf":
output_type = "df_obj"
output_mem_type = MemoryType.device
elif output_type == "pandas":
output_type = "df_obj"
output_mem_type = MemoryType.host
if output_dtype is None:
output_dtype = self.dtype
if output_mem_type is None:
output_mem_type = self._mem_type
else:
output_mem_type = MemoryType.from_str(output_mem_type)
if output_mem_type == MemoryType.mirror:
output_mem_type = self._mem_type
if output_type == "df_obj":
if len(self.shape) == 1:
output_type = "series"
elif len(self.shape) == 2 and self.shape[1] == 1:
# It is convenient to coerce 2D arrays with second
# dimension 1 to series, but we will not extend this to higher
# dimensions
output_type = "series"
else:
output_type = "dataframe"
if output_type == "array":
if output_mem_type == MemoryType.host:
if self._mem_type == MemoryType.host:
return np.asarray(
self, dtype=output_dtype, order=self.order
)
if isinstance(
self._owner, _specific_supported_types
) or "cuml" in str(type(self._owner)):
cp_arr = cp.asarray(
self, dtype=output_dtype, order=self.order
)
else:
if self._owner is not None:
cp_arr = cp.asarray(
self._owner, dtype=output_dtype, order=self.order
)
else:
cp_arr = cp.asarray(
self, dtype=output_dtype, order=self.order
)
return cp.asnumpy(
cp_arr,
order=self.order,
)
return output_mem_type.xpy.asarray(
self, dtype=output_dtype, order=self.order
)
elif output_type == "numba":
return cuda.as_cuda_array(
cp.asarray(self, dtype=output_dtype, order=self.order)
)
elif output_type == "series":
if len(self.shape) == 2 and self.shape[1] == 1:
arr = CumlArray(
self,
dtype=self.dtype,
order=self.order,
shape=(self.shape[0],),
)
else:
arr = self
if len(arr.shape) == 1:
try:
if (
output_mem_type == MemoryType.host
and arr._mem_type != MemoryType.host
):
return cudf.Series(
arr, dtype=output_dtype, index=self.index
).to_pandas()
else:
return output_mem_type.xdf.Series(
arr, dtype=output_dtype, index=self.index
)
except TypeError:
raise ValueError("Unsupported dtype for Series")
else:
raise ValueError(
"Only single dimensional arrays can be transformed to"
" Series."
)
elif output_type == "dataframe":
arr = self.to_output(
output_type="array",
output_dtype=output_dtype,
output_mem_type=output_mem_type,
)
if len(arr.shape) == 1:
arr = arr.reshape(arr.shape[0], 1)
if self.index is None:
out_index = None
elif (
output_mem_type.is_device_accessible
and not self.mem_type.is_device_accessible
):
out_index = cudf.Index.from_pandas(self.index)
elif (
output_mem_type.is_host_accessible
and not self.mem_type.is_host_accessible
):
out_index = self.index.to_pandas()
else:
out_index = self.index
try:
result = output_mem_type.xdf.DataFrame(arr, index=out_index)
return result
except TypeError:
raise ValueError("Unsupported dtype for DataFrame")
return self
@nvtx_annotate(
message="common.CumlArray.host_serialize",
category="utils",
domain="cuml_python",
)
def host_serialize(self):
mem_type = (
self.mem_type
if self.mem_type.is_host_accessible
else MemoryType.host
)
return self.serialize(mem_type=mem_type)
@classmethod
def host_deserialize(cls, header, frames):
typ = pickle.loads(header["type-serialized"])
assert all(not is_cuda for is_cuda in header["is-cuda"])
obj = typ.deserialize(header, frames)
return obj
@nvtx_annotate(
message="common.CumlArray.device_serialize",
category="utils",
domain="cuml_python",
)
def device_serialize(self):
mem_type = (
self.mem_type
if self.mem_type.is_device_accessible
else MemoryType.device
)
return self.serialize(mem_type=mem_type)
@classmethod
def device_deserialize(cls, header, frames):
typ = pickle.loads(header["type-serialized"])
assert all(is_cuda for is_cuda in header["is-cuda"])
obj = typ.deserialize(header, frames)
return obj
@nvtx_annotate(
message="common.CumlArray.serialize",
category="utils",
domain="cuml_python",
)
def serialize(self, mem_type=None) -> Tuple[dict, list]:
mem_type = self.mem_type if mem_type is None else mem_type
header = {
"type-serialized": pickle.dumps(type(self)),
"constructor-kwargs": {
"dtype": self.dtype.str,
"shape": self.shape,
"mem_type": mem_type.name,
},
"desc": self._array_interface,
"frame_count": 1,
"is-cuda": [mem_type.is_device_accessible],
"lengths": [self.size],
}
frames = [self.to_output("array", output_mem_type=mem_type)]
return header, frames
@classmethod
def deserialize(cls, header: dict, frames: list):
assert (
header["frame_count"] == 1
), "Only expecting to deserialize CumlArray with a single frame."
ary = cls(data=frames[0], **header["constructor-kwargs"])
if header["desc"]["shape"] != ary._array_interface["shape"]:
raise ValueError(
f"Received a `Buffer` with the wrong size."
f" Expected {header['desc']['shape']}, "
f"but got {ary._array_interface['shape']}"
)
return ary.to_mem_type(GlobalSettings().memory_type)
def __reduce_ex__(self, protocol):
header, frames = self.host_serialize()
return self.host_deserialize, (header, frames)
@nvtx_annotate(
message="common.CumlArray.to_host_array",
category="utils",
domain="cuml_python",
)
def to_mem_type(self, mem_type):
return self.__class__(
data=self.to_output("array", output_mem_type=mem_type),
index=self.index,
order=self.order,
mem_type=MemoryType.from_str(mem_type),
validate=False,
)
@nvtx_annotate(
message="common.CumlArray.to_host_array",
category="utils",
domain="cuml_python",
)
def to_host_array(self):
return self.to_output("numpy")
@nvtx_annotate(
message="common.CumlArray.to_host_array",
category="utils",
domain="cuml_python",
)
def to_device_array(self):
return self.to_output("cupy")
@classmethod
@nvtx_annotate(
message="common.CumlArray.empty",
category="utils",
domain="cuml_python",
)
def empty(cls, shape, dtype, order="F", index=None, mem_type=None):
"""
Create an empty Array with an allocated but uninitialized DeviceBuffer
Parameters
----------
dtype : data-type, optional
Any object that can be interpreted as a numpy or cupy data type.
shape : int or tuple of ints, optional
Shape of created array.
order: string, optional
Whether to create a F-major or C-major array.
"""
if mem_type is None:
mem_type = GlobalSettings().memory_type
return CumlArray(mem_type.xpy.empty(shape, dtype, order), index=index)
@classmethod
@nvtx_annotate(
message="common.CumlArray.full", category="utils", domain="cuml_python"
)
def full(cls, shape, value, dtype, order="F", index=None, mem_type=None):
"""
Create an Array with an allocated DeviceBuffer initialized to value.
Parameters
----------
dtype : data-type, optional
Any object that can be interpreted as a numpy or cupy data type.
shape : int or tuple of ints, optional
Shape of created array.
order: string, optional
Whether to create a F-major or C-major array.
"""
if mem_type is None:
mem_type = GlobalSettings().memory_type
return CumlArray(
mem_type.xpy.full(shape, value, dtype, order), index=index
)
@classmethod
@nvtx_annotate(
message="common.CumlArray.zeros",
category="utils",
domain="cuml_python",
)
def zeros(
cls, shape, dtype="float32", order="F", index=None, mem_type=None
):
"""
Create an Array with an allocated DeviceBuffer initialized to zeros.
Parameters
----------
dtype : data-type, optional
Any object that can be interpreted as a numpy or cupy data type.
shape : int or tuple of ints, optional
Shape of created array.
order: string, optional
Whether to create a F-major or C-major array.
"""
return CumlArray.full(
value=0,
shape=shape,
dtype=dtype,
order=order,
index=index,
mem_type=mem_type,
)
@classmethod
@nvtx_annotate(
message="common.CumlArray.ones", category="utils", domain="cuml_python"
)
def ones(
cls, shape, dtype="float32", order="F", index=None, mem_type=None
):
"""
Create an Array with an allocated DeviceBuffer initialized to zeros.
Parameters
----------
dtype : data-type, optional
Any object that can be interpreted as a numpy or cupy data type.
shape : int or tuple of ints, optional
Shape of created array.
order: string, optional
Whether to create a F-major or C-major array.
"""
return CumlArray.full(
value=1,
shape=shape,
dtype=dtype,
order=order,
index=index,
mem_type=mem_type,
)
@classmethod
@nvtx_annotate(
message="common.CumlArray.from_input",
category="utils",
domain="cuml_python",
)
def from_input(
cls,
X,
order="F",
deepcopy=False,
check_dtype=False,
convert_to_dtype=False,
check_mem_type=False,
convert_to_mem_type=None,
safe_dtype_conversion=True,
check_cols=False,
check_rows=False,
fail_on_order=False,
force_contiguous=True,
):
"""
Convert input X to CumlArray.
Acceptable input formats:
* cuDF Dataframe - returns a deep copy always.
* cuDF Series - returns by reference or a deep copy depending on
`deepcopy`.
* Numpy array - returns a copy in device always
* cuda array interface compliant array (like Cupy) - returns a
reference unless `deepcopy`=True.
* numba device array - returns a reference unless deepcopy=True
Parameters
----------
X : cuDF.DataFrame, cuDF.Series, NumPy array, Pandas DataFrame, Pandas
Series or any cuda_array_interface (CAI) compliant array like CuPy,
Numba or pytorch.
order: 'F', 'C' or 'K' (default: 'F')
Whether to return a F-major ('F'), C-major ('C') array or Keep
('K') the order of X. Used to check the order of the input. If
fail_on_order=True, the method will raise ValueError, otherwise it
will convert X to be of order `order` if needed.
deepcopy: boolean (default: False)
Set to True to always return a deep copy of X.
check_dtype: np.dtype (default: False)
Set to a np.dtype to throw an error if X is not of dtype
`check_dtype`.
convert_to_dtype: np.dtype (default: False)
Set to a dtype if you want X to be converted to that dtype if it is
not that dtype already.
check_mem_type: {'host', 'device'} (default: False)
Set to a value to throw an error if X is not of memory type
`check_mem_type`.
convert_to_mem_type: {'host', 'device'} (default: None)
Set to a value if you want X to be converted to that memory type if
it is not that memory type already. Set to False if you do not want
any memory conversion. Set to None to use
`cuml.global_settings.memory_type`.
safe_convert_to_dtype: bool (default: True)
Set to True to check whether a typecasting performed when
convert_to_dtype is True will cause information loss. This has a
performance implication that might be significant for very fast
methods like FIL and linear models inference.
check_cols: int (default: False)
Set to an int `i` to check that input X has `i` columns. Set to
False (default) to not check at all.
check_rows: boolean (default: False)
Set to an int `i` to check that input X has `i` columns. Set to
False (default) to not check at all.
fail_on_order: boolean (default: False)
Set to True if you want the method to raise a ValueError if X is
not of order `order`.
force_contiguous: boolean (default: True)
Set to True to force CumlArray produced to be contiguous. If `X` is
non contiguous then a contiguous copy will be done.
If False, and `X` doesn't need to be converted and is not
contiguous, the underlying memory underneath the CumlArray will be
non contiguous. Only affects CAI inputs. Only affects CuPy and
Numba device array views, all other input methods produce
contiguous CumlArrays.
Returns
-------
arr: CumlArray
A new CumlArray
"""
if convert_to_mem_type is None:
convert_to_mem_type = GlobalSettings().memory_type
else:
convert_to_mem_type = (
MemoryType.from_str(convert_to_mem_type)
if convert_to_mem_type
else convert_to_mem_type
)
if convert_to_dtype:
convert_to_dtype = host_xpy.dtype(convert_to_dtype)
# Provide fast-path for CumlArray input
if (
isinstance(X, CumlArray)
and (
not convert_to_mem_type
or convert_to_mem_type == MemoryType.mirror
or convert_to_mem_type == X.mem_type
)
and (not convert_to_dtype or convert_to_dtype == X.dtype)
and (not force_contiguous or X.is_contiguous)
and (order in ("K", None) or X.order == order)
and not check_dtype
and not check_mem_type
and not check_cols
and not check_rows
):
if deepcopy:
return copy.deepcopy(X)
else:
return X
if isinstance(
X, (DaskCudfSeries, DaskCudfDataFrame, DaskSeries, DaskDataFrame)
):
# TODO: Warn, but not when using dask_sql
X = X.compute()
index = getattr(X, "index", None)
if index is not None:
if convert_to_mem_type is MemoryType.host and isinstance(
index, CudfIndex
):
index = index.to_pandas()
elif convert_to_mem_type is MemoryType.device and isinstance(
index, PandasIndex
):
try:
index = CudfIndex.from_pandas(index)
except TypeError:
index = CudfIndex(index)
if isinstance(X, CudfSeries):
if X.null_count != 0:
raise ValueError(
"Error: cuDF Series has missing/null values, "
"which are not supported by cuML."
)
if isinstance(X, CudfDataFrame):
X = X.to_cupy(copy=False)
elif isinstance(X, (PandasDataFrame, PandasSeries)):
X = X.to_numpy(copy=False)
elif hasattr(X, "__dataframe__"):
# temporarily use this codepath to avoid errors, substitute
# usage of dataframe interchange protocol once ready.
X = X.to_numpy()
deepcopy = False
requested_order = (order, None)[fail_on_order]
arr = cls(X, index=index, order=requested_order, validate=False)
if deepcopy:
arr = copy.deepcopy(arr)
if convert_to_mem_type == MemoryType.mirror:
convert_to_mem_type = arr.mem_type
if convert_to_dtype:
convert_to_dtype = arr.mem_type.xpy.dtype(convert_to_dtype)
conversion_required = (
convert_to_dtype and (convert_to_dtype != arr.dtype)
) or (convert_to_mem_type and (convert_to_mem_type != arr.mem_type))
make_copy = False
if conversion_required:
convert_to_dtype = convert_to_dtype or None
convert_to_mem_type = convert_to_mem_type or None
if (
safe_dtype_conversion
and convert_to_dtype is not None
and not arr.mem_type.xpy.can_cast(
arr.dtype, convert_to_dtype, casting="safe"
)
):
try:
target_dtype_range = arr.mem_type.xpy.iinfo(
convert_to_dtype
)
except ValueError:
target_dtype_range = arr.mem_type.xpy.finfo(
convert_to_dtype
)
if is_numba_array(X):
X = cp.asarray(X)
if (
(X < target_dtype_range.min) | (X > target_dtype_range.max)
).any():
raise TypeError(
"Data type conversion on values outside"
" representable range of target dtype"
)
arr = cls(
arr.to_output(
output_dtype=convert_to_dtype,
output_mem_type=convert_to_mem_type,
),
order=requested_order,
index=index,
validate=False,
)
make_copy = force_contiguous and not arr.is_contiguous
if (
not fail_on_order and order != arr.order and order != "K"
) or make_copy:
arr = cls(
arr.mem_type.xpy.array(
arr.to_output("array"), order=order, copy=make_copy
),
index=index,
)
n_rows = arr.shape[0]
if len(arr.shape) > 1:
n_cols = arr.shape[1]
else:
n_cols = 1
if (n_cols == 1 or n_rows == 1) and len(arr.shape) == 2:
order = "K"
if order != "K" and arr.order != order:
if order == "F":
order_str = "column ('F')"
elif order == "C":
order_str = "row ('C')"
else:
order_str = f"UNKNOWN ('{order}')"
if fail_on_order:
raise ValueError(
f"Expected {order_str} major order but got something else."
)
else:
debug(
f"Expected {order_str} major order but got something else."
" Converting data; this will result in additional memory"
" utilization."
)
if check_dtype:
try:
check_dtype = [
arr.mem_type.xpy.dtype(dtype) for dtype in check_dtype
]
except TypeError:
check_dtype = [arr.mem_type.xpy.dtype(check_dtype)]
if arr.dtype not in check_dtype:
raise TypeError(
f"Expected input to be of type in {check_dtype} but got"
f" {arr.dtype}"
)
if check_cols:
if n_cols != check_cols:
raise ValueError(
f"Expected {check_cols} columns but got {n_cols}"
" columns."
)
if check_rows:
if n_rows != check_rows:
raise ValueError(
f"Expected {check_rows} rows but got {n_rows}" " rows."
)
return arr
def array_to_memory_order(arr, default="C"):
"""
Given an array-like object, determine its memory order
If arr is C-contiguous, the string 'C' will be returned; if
F-contiguous 'F'. If arr is neither C nor F contiguous, None will be
returned. If an arr is both C and F contiguous, the indicated default
will be returned. If a default of None or 'K' is given and the arr is both
C and F contiguous, 'C' will be returned.
"""
try:
return arr.order
except AttributeError:
pass
try:
array_interface = arr.__cuda_array_interface__
except AttributeError:
try:
array_interface = arr.__array_interface__
except AttributeError:
return array_to_memory_order(CumlArray.from_input(arr, order="K"))
strides = array_interface.get("strides", None)
if strides is None:
try:
strides = arr.strides
except AttributeError:
pass
return _determine_memory_order(
array_interface["shape"],
strides,
array_interface["typestr"],
default=default,
)
def is_array_contiguous(arr):
"""Return true if array is C or F contiguous"""
try: # Fast path for CumlArray
return arr.is_contiguous
except AttributeError:
pass
try: # Fast path for cupy/numpy arrays
return arr.flags["C_CONTIGUOUS"] or arr.flags["F_CONTIGUOUS"]
except (AttributeError, KeyError):
return array_to_memory_order(arr) is not None
def elements_in_representable_range(arr, dtype):
"""Return true if all elements of the array can be represented in the
available range of the given dtype"""
arr = CumlArray.from_input(arr)
dtype = arr.mem_type.xpy.dtype(dtype)
try:
dtype_range = arr.mem_type.xpy.iinfo(dtype)
except ValueError:
dtype_range = arr.mem_type.xpy.finfo(dtype)
arr_xpy = arr.to_output("array")
return not (
((arr_xpy < dtype_range.min) | (arr_xpy > dtype_range.max)).any()
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/type_utils.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import typing
from cuml.internals.safe_imports import gpu_only_import, UnavailableError
cp = gpu_only_import("cupy")
try:
# Those are the only data types supported by cupyx.scipy.sparse matrices.
CUPY_SPARSE_DTYPES = [cp.float32, cp.float64, cp.complex64, cp.complex128]
except UnavailableError:
CUPY_SPARSE_DTYPES = []
# Use _DecoratorType as a type variable for decorators. See:
# https://github.com/python/mypy/pull/8336/files#diff-eb668b35b7c0c4f88822160f3ca4c111f444c88a38a3b9df9bb8427131538f9cR260
_DecoratorType = typing.TypeVar(
"_DecoratorType", bound=typing.Callable[..., typing.Any]
)
def wraps_typed(
wrapped: _DecoratorType,
assigned=("__doc__", "__annotations__"),
updated=functools.WRAPPER_UPDATES,
) -> typing.Callable[[_DecoratorType], _DecoratorType]:
"""
Typed version of `functools.wraps`. Allows decorators to retain their
return type.
"""
return functools.wraps(wrapped=wrapped, assigned=assigned, updated=updated)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/api_context_managers.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import typing
from collections import deque
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
import cuml.internals.input_utils
import cuml.internals.memory_utils
from cuml.internals.array_sparse import SparseCumlArray
if TYPE_CHECKING:
from cuml.internals.base import Base
from cuml.internals.global_settings import GlobalSettings
from cuml.internals.mem_type import MemoryType
from cuml.internals.safe_imports import (
gpu_only_import_from,
UnavailableNullContext,
)
cupy_using_allocator = gpu_only_import_from(
"cupy.cuda", "using_allocator", alt=UnavailableNullContext
)
rmm_cupy_allocator = gpu_only_import_from(
"rmm.allocators.cupy", "rmm_cupy_allocator"
)
@contextlib.contextmanager
def _using_mirror_output_type():
"""
Sets global_settings.output_type to "mirror" for internal API
handling. We need a separate function since `cuml.using_output_type()`
doesn't accept "mirror"
Yields
-------
string
Returns the previous value in global_settings.output_type
"""
prev_output_type = GlobalSettings().output_type
try:
GlobalSettings().output_type = "mirror"
yield prev_output_type
finally:
GlobalSettings().output_type = prev_output_type
def in_internal_api():
return GlobalSettings().root_cm is not None
def set_api_output_type(output_type: str):
assert GlobalSettings().root_cm is not None
# Quick exit
if isinstance(output_type, str):
GlobalSettings().root_cm.output_type = output_type
return
# Try to convert any array objects to their type
array_type = cuml.internals.input_utils.determine_array_type(output_type)
# Ensure that this is an array-like object
assert output_type is None or array_type is not None
GlobalSettings().root_cm.output_type = array_type
def set_api_memory_type(mem_type):
assert GlobalSettings().root_cm is not None
try:
mem_type = MemoryType.from_str(mem_type)
except ValueError:
mem_type = cuml.internals.memory_utils.determine_array_memtype(
mem_type
)
GlobalSettings().root_cm.memory_type = mem_type
def set_api_output_dtype(output_dtype):
assert GlobalSettings().root_cm is not None
# Try to convert any array objects to their type
if output_dtype is not None and cuml.internals.input_utils.is_array_like(
output_dtype
):
output_dtype = cuml.internals.input_utils.determine_array_dtype(
output_dtype
)
assert output_dtype is not None
GlobalSettings().root_cm.output_dtype = output_dtype
class InternalAPIContext(contextlib.ExitStack):
def __init__(self):
super().__init__()
def cleanup():
GlobalSettings().root_cm = None
self.callback(cleanup)
self.enter_context(cupy_using_allocator(rmm_cupy_allocator))
self.prev_output_type = self.enter_context(_using_mirror_output_type())
self._output_type = None
self._memory_type = None
self.output_dtype = None
# Set the output type to the prev_output_type. If "input", set to None
# to allow inner functions to specify the input
self.output_type = (
None if self.prev_output_type == "input" else self.prev_output_type
)
self._count = 0
self.call_stack = {}
GlobalSettings().root_cm = self
@property
def output_type(self):
return self._output_type
@output_type.setter
def output_type(self, value: str):
self._output_type = value
@property
def memory_type(self):
return self._memory_type
@memory_type.setter
def memory_type(self, value):
self._memory_type = MemoryType.from_str(value)
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance."""
new_stack = contextlib.ExitStack()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def __enter__(self) -> int:
self._count += 1
return self._count
def __exit__(self, *exc_details):
self._count -= 1
return
@contextlib.contextmanager
def push_output_types(self):
try:
old_output_type = self.output_type
old_output_dtype = self.output_dtype
self.output_type = None
self.output_dtype = None
yield
finally:
self.output_type = (
old_output_type
if old_output_type is not None
else self.output_type
)
self.output_dtype = (
old_output_dtype
if old_output_dtype is not None
else self.output_dtype
)
def get_internal_context() -> InternalAPIContext:
"""Return the current "root" context manager used to control output type
for external API calls and minimize unnecessary internal output
conversions"""
if GlobalSettings().root_cm is None:
GlobalSettings().root_cm = InternalAPIContext()
return GlobalSettings().root_cm
class ProcessEnter(object):
def __init__(self, context: "InternalAPIContextBase"):
super().__init__()
self._context = context
self._process_enter_cbs: typing.Deque[typing.Callable] = deque()
def process_enter(self):
for cb in self._process_enter_cbs:
cb()
class ProcessReturn(object):
def __init__(self, context: "InternalAPIContextBase"):
super().__init__()
self._context = context
self._process_return_cbs: typing.Deque[
typing.Callable[[typing.Any], typing.Any]
] = deque()
def process_return(self, ret_val):
for cb in self._process_return_cbs:
ret_val = cb(ret_val)
return ret_val
EnterT = typing.TypeVar("EnterT", bound=ProcessEnter)
ProcessT = typing.TypeVar("ProcessT", bound=ProcessReturn)
class InternalAPIContextBase(
contextlib.ExitStack, typing.Generic[EnterT, ProcessT]
):
ProcessEnter_Type: typing.Type[EnterT] = None
ProcessReturn_Type: typing.Type[ProcessT] = None
def __init__(self, func=None, args=None):
super().__init__()
self._func = func
self._args = args
self.root_cm = get_internal_context()
self.is_root = False
self._enter_obj: ProcessEnter = self.ProcessEnter_Type(self)
self._process_obj: ProcessReturn = None
def __enter__(self):
# Enter the root context to know if we are the root cm
self.is_root = self.enter_context(self.root_cm) == 1
# If we are the first, push any callbacks from the root into this CM
# If we are not the first, this will have no effect
self.push(self.root_cm.pop_all())
self._enter_obj.process_enter()
# Now create the process functions since we know if we are root or not
self._process_obj = self.ProcessReturn_Type(self)
return super().__enter__()
def process_return(self, ret_val):
return self._process_obj.process_return(ret_val)
def __class_getitem__(cls: typing.Type["InternalAPIContextBase"], params):
param_names = [
param.__name__ if hasattr(param, "__name__") else str(param)
for param in params
]
type_name = f'{cls.__name__}[{", ".join(param_names)}]'
ns = {
"ProcessEnter_Type": params[0],
"ProcessReturn_Type": params[1],
}
return type(type_name, (cls,), ns)
class ProcessEnterBaseMixin(ProcessEnter):
def __init__(self, context: "InternalAPIContextBase"):
super().__init__(context)
self.base_obj: Base = self._context._args[0]
class ProcessEnterReturnAny(ProcessEnter):
pass
class ProcessEnterReturnArray(ProcessEnter):
def __init__(self, context: "InternalAPIContextBase"):
super().__init__(context)
self._process_enter_cbs.append(self.push_output_types)
def push_output_types(self):
self._context.enter_context(self._context.root_cm.push_output_types())
class ProcessEnterBaseReturnArray(
ProcessEnterReturnArray, ProcessEnterBaseMixin
):
def __init__(self, context: "InternalAPIContextBase"):
super().__init__(context)
# IMPORTANT: Only perform output type processing if
# `root_cm.output_type` is None. Since we default to using the incoming
# value if its set, there is no need to do any processing if the user
# has specified the output type
if (
self._context.root_cm.prev_output_type is None
or self._context.root_cm.prev_output_type == "input"
):
self._process_enter_cbs.append(self.base_output_type_callback)
def base_output_type_callback(self):
root_cm = self._context.root_cm
def set_output_type():
output_type = root_cm.output_type
mem_type = root_cm.memory_type
# Check if output_type is None, can happen if no output type has
# been set by estimator
if output_type is None:
output_type = self.base_obj.output_type
if mem_type is None:
mem_type = self.base_obj.output_mem_type
if output_type == "input":
output_type = self.base_obj._input_type
mem_type = self.base_obj._input_mem_type
if mem_type is None:
mem_type = GlobalSettings().memory_type
if output_type != root_cm.output_type:
set_api_output_type(output_type)
if mem_type != root_cm.memory_type:
set_api_memory_type(mem_type)
assert output_type != "mirror"
self._context.callback(set_output_type)
class ProcessReturnAny(ProcessReturn):
pass
class ProcessReturnArray(ProcessReturn):
def __init__(self, context: "InternalAPIContextBase"):
super().__init__(context)
self._process_return_cbs.append(self.convert_to_cumlarray)
if self._context.is_root or GlobalSettings().output_type != "mirror":
self._process_return_cbs.append(self.convert_to_outputtype)
def convert_to_cumlarray(self, ret_val):
# Get the output type
(
ret_val_type_str,
is_sparse,
) = cuml.internals.input_utils.determine_array_type_full(ret_val)
# If we are a supported array and not already cuml, convert to cuml
if ret_val_type_str is not None and ret_val_type_str != "cuml":
if is_sparse:
ret_val = SparseCumlArray(
ret_val,
convert_to_mem_type=GlobalSettings().memory_type,
convert_index=False,
)
else:
ret_val = cuml.internals.input_utils.input_to_cuml_array(
ret_val,
convert_to_mem_type=GlobalSettings().memory_type,
order="K",
).array
return ret_val
def convert_to_outputtype(self, ret_val):
output_type = GlobalSettings().output_type
memory_type = GlobalSettings().memory_type
if (
output_type is None
or output_type == "mirror"
or output_type == "input"
):
output_type = self._context.root_cm.output_type
if GlobalSettings().memory_type in (None, MemoryType.mirror):
memory_type = self._context.root_cm.memory_type
assert (
output_type is not None
and output_type != "mirror"
and output_type != "input"
), ("Invalid root_cm.output_type: " "'{}'.").format(output_type)
return ret_val.to_output(
output_type=output_type,
output_dtype=self._context.root_cm.output_dtype,
output_mem_type=memory_type,
)
class ProcessReturnSparseArray(ProcessReturnArray):
def convert_to_cumlarray(self, ret_val):
# Get the output type
(
ret_val_type_str,
is_sparse,
) = cuml.internals.input_utils.determine_array_type_full(ret_val)
# If we are a supported array and not already cuml, convert to cuml
if ret_val_type_str is not None and ret_val_type_str != "cuml":
if is_sparse:
ret_val = SparseCumlArray(
ret_val,
convert_to_mem_type=GlobalSettings().memory_type,
convert_index=False,
)
else:
ret_val = cuml.internals.input_utils.input_to_cuml_array(
ret_val,
convert_to_mem_type=GlobalSettings().memory_type,
order="K",
).array
return ret_val
class ProcessReturnGeneric(ProcessReturnArray):
def __init__(self, context: "InternalAPIContextBase"):
super().__init__(context)
# Clear the existing callbacks to allow processing one at a time
self._single_array_cbs = self._process_return_cbs
# Make a new queue
self._process_return_cbs = deque()
self._process_return_cbs.append(self.process_generic)
def process_single(self, ret_val):
for cb in self._single_array_cbs:
ret_val = cb(ret_val)
return ret_val
def process_tuple(self, ret_val: tuple):
# Convert to a list
out_val = list(ret_val)
for idx, item in enumerate(out_val):
out_val[idx] = self.process_generic(item)
return tuple(out_val)
def process_dict(self, ret_val):
for name, item in ret_val.items():
ret_val[name] = self.process_generic(item)
return ret_val
def process_list(self, ret_val):
for idx, item in enumerate(ret_val):
ret_val[idx] = self.process_generic(item)
return ret_val
def process_generic(self, ret_val):
if cuml.internals.input_utils.is_array_like(ret_val):
return self.process_single(ret_val)
if isinstance(ret_val, tuple):
return self.process_tuple(ret_val)
if isinstance(ret_val, dict):
return self.process_dict(ret_val)
if isinstance(ret_val, list):
return self.process_list(ret_val)
return ret_val
class ReturnAnyCM(
InternalAPIContextBase[ProcessEnterReturnAny, ProcessReturnAny]
):
pass
class ReturnArrayCM(
InternalAPIContextBase[ProcessEnterReturnArray, ProcessReturnArray]
):
pass
class ReturnSparseArrayCM(
InternalAPIContextBase[ProcessEnterReturnArray, ProcessReturnSparseArray]
):
pass
class ReturnGenericCM(
InternalAPIContextBase[ProcessEnterReturnArray, ProcessReturnGeneric]
):
pass
class BaseReturnAnyCM(
InternalAPIContextBase[ProcessEnterReturnAny, ProcessReturnAny]
):
pass
class BaseReturnArrayCM(
InternalAPIContextBase[ProcessEnterBaseReturnArray, ProcessReturnArray]
):
pass
class BaseReturnSparseArrayCM(
InternalAPIContextBase[
ProcessEnterBaseReturnArray, ProcessReturnSparseArray
]
):
pass
class BaseReturnGenericCM(
InternalAPIContextBase[ProcessEnterBaseReturnArray, ProcessReturnGeneric]
):
pass
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/__init__.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.available_devices import is_cuda_available
from cuml.internals.base_helpers import BaseMetaClass, _tags_class_and_instance
from cuml.internals.api_decorators import (
_deprecate_pos_args,
api_base_fit_transform,
api_base_return_any_skipall,
api_base_return_any,
api_base_return_array_skipall,
api_base_return_array,
api_base_return_generic_skipall,
api_base_return_generic,
api_base_return_sparse_array,
api_return_any,
api_return_array,
api_return_generic,
api_return_sparse_array,
exit_internal_api,
)
from cuml.internals.api_context_managers import (
in_internal_api,
set_api_output_dtype,
set_api_output_type,
)
if is_cuda_available():
from cuml.internals.internals import GraphBasedDimRedCallback
from cuml.internals.constants import CUML_WRAPPED_FLAG
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/mixins.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
from copy import deepcopy
from cuml.common.doc_utils import generate_docstring
from cuml.internals.api_decorators import api_base_return_any_skipall
from cuml.internals.base_helpers import _tags_class_and_instance
from cuml.internals.api_decorators import enable_device_interop
###############################################################################
# Tag Functionality Mixin #
###############################################################################
# Default tags for estimators inheritting from Base.
# tag system based on experimental tag system from Scikit-learn >=0.21
# https://scikit-learn.org/stable/developers/develop.html#estimator-tags
_default_tags = {
# cuML specific tags
"preferred_input_order": None,
"X_types_gpu": ["2darray"],
# Scikit-learn API standard tags
"allow_nan": False,
"binary_only": False,
"multilabel": False,
"multioutput": False,
"multioutput_only": False,
"no_validation": False,
"non_deterministic": False,
"pairwise": False,
"poor_score": False,
"preserves_dtype": [],
"requires_fit": True,
"requires_positive_X": False,
"requires_positive_y": False,
"requires_y": False,
"stateless": False,
"X_types": ["2darray"],
"_skip_test": False,
"_xfail_checks": False,
}
class TagsMixin:
@_tags_class_and_instance
def _get_tags(cls):
"""
Method that collects all the static tags associated to any
inheritting class. The Base class for cuML's estimators already
uses this mixin, so most estimators don't need to use this Mixin
directly.
- Tags usage:
In general, inheriting classes can use the appropriate Mixins defined
in this file. Additional static tags can be defined by the
`_get_static_tags` method like:
```
@staticmethod
def _more_static_tags():
return {
"requires_y": True
}
```
The method traverses the MRO in reverse
order, i.e. the closer the parent to the final class will be
explored later, so that children classes can overwrite their
parent tags.
- Mixin Usage
If your class is not inheritting from cuml's Base
then your class can use composition from this Mixin to get the tags
behavior. If you want your class to have default tags different than
the ones defined in this file, then implement the `_default_tags`
method that returns a dictionary, like:
class BaseClassWithTags(TagMixin)
@staticmethod
def _default_tags():
return {'tag1': True, 'tag2': False}
Method and code based on scikit-learn 0.21 _get_tags functionality:
https://scikit-learn.org/stable/developers/develop.html#estimator-tags
Examples
--------
>>> import cuml
>>>
>>> cuml.DBSCAN._get_tags()
{'preferred_input_order': 'C', 'X_types_gpu': ['2darray'],
'non_deterministic': False, 'requires_positive_X': False,
'requires_positive_y': False, 'X_types': ['2darray'],
'poor_score': False, 'no_validation': False, 'multioutput': False,
'allow_nan': False, 'stateless': False, 'multilabel': False,
'_skip_test': False, '_xfail_checks': False, 'multioutput_only': False,
'binary_only': False, 'requires_fit': True, 'requires_y': False,
'pairwise': False}
"""
if hasattr(cls, "_default_tags"):
tags = cls._default_tags()
else:
tags = deepcopy(_default_tags)
for cl in reversed(inspect.getmro(cls)):
if hasattr(cl, "_more_static_tags"):
more_tags = cl._more_static_tags()
tags.update(more_tags)
return tags
@_get_tags.instance_method
def _get_tags(self):
"""
Method to add dynamic tags capability to objects. Useful for cases
where a tag depends on a value of an instantiated object. Dynamic tags
will override class static tags, and can be defined with the
_more_tags method in inheritting classes like:
def _more_tags(self):
return {'no_validation': not self.validate}
Follows the same logic regarding the MRO as the static _get_tags.
First it collects all the static tags of the reversed MRO, and then
collects the dynamic tags and overwrites the corresponding static
ones.
Examples
--------
>>> import cuml
>>>
>>> estimator = cuml.DBSCAN()
>>> estimator._get_tags()
{'preferred_input_order': 'C', 'X_types_gpu': ['2darray'],
'non_deterministic': False, 'requires_positive_X': False,
'requires_positive_y': False, 'X_types': ['2darray'],
'poor_score': False, 'no_validation': False, 'multioutput': False,
'allow_nan': False, 'stateless': False, 'multilabel': False,
'_skip_test': False, '_xfail_checks': False, 'multioutput_only': False,
'binary_only': False, 'requires_fit': True, 'requires_y': False,
'pairwise': False}
"""
if hasattr(self, "_default_tags"):
tags = self._default_tags()
else:
tags = deepcopy(_default_tags)
dynamic_tags = {}
for cl in reversed(inspect.getmro(self.__class__)):
if hasattr(cl, "_more_static_tags"):
more_tags = cl._more_static_tags()
tags.update(more_tags)
if hasattr(cl, "_more_tags"):
more_tags = cl._more_tags(self)
dynamic_tags.update(more_tags)
tags.update(dynamic_tags)
return tags
###############################################################################
# Estimator Type Mixins #
# Estimators should only use one of these. #
###############################################################################
class RegressorMixin:
"""
Mixin class for regression estimators in cuML
"""
_estimator_type = "regressor"
@generate_docstring(
return_values={
"name": "score",
"type": "float",
"description": "R^2 of self.predict(X) " "wrt. y.",
}
)
@api_base_return_any_skipall
@enable_device_interop
def score(self, X, y, **kwargs):
"""
Scoring function for regression estimators
Returns the coefficient of determination R^2 of the prediction.
"""
from cuml.metrics.regression import r2_score
if hasattr(self, "handle"):
handle = self.handle
else:
handle = None
preds = self.predict(X, **kwargs)
return r2_score(y, preds, handle=handle)
@staticmethod
def _more_static_tags():
return {"requires_y": True}
class ClassifierMixin:
"""
Mixin class for classifier estimators in cuML
"""
_estimator_type = "classifier"
@generate_docstring(
return_values={
"name": "score",
"type": "float",
"description": (
"Accuracy of self.predict(X) wrt. y "
"(fraction where y == pred_y)"
),
}
)
@api_base_return_any_skipall
@enable_device_interop
def score(self, X, y, **kwargs):
"""
Scoring function for classifier estimators based on mean accuracy.
"""
from cuml.metrics.accuracy import accuracy_score
if hasattr(self, "handle"):
handle = self.handle
else:
handle = None
preds = self.predict(X, **kwargs)
return accuracy_score(y, preds, handle=handle)
@staticmethod
def _more_static_tags():
return {"requires_y": True}
class ClusterMixin:
"""
Mixin class for clustering estimators in cuML.
"""
_estimator_type = "clusterer"
@staticmethod
def _more_static_tags():
return {"requires_y": False}
###############################################################################
# Input Mixins #
# Estimators can use as many of these as needed. #
###############################################################################
class FMajorInputTagMixin:
"""
Mixin class for estimators that prefer inputs in F (column major) order.
"""
@staticmethod
def _more_static_tags():
return {"preferred_input_order": "F"}
class CMajorInputTagMixin:
"""
Mixin class for estimators that prefer inputs in C (row major) order.
"""
@staticmethod
def _more_static_tags():
return {"preferred_input_order": "C"}
class SparseInputTagMixin:
"""
Mixin class for estimators that can take (GPU and host) sparse inputs.
"""
@staticmethod
def _more_static_tags():
return {
"X_types_gpu": ["2darray", "sparse"],
"X_types": ["2darray", "sparse"],
}
class StringInputTagMixin:
"""
Mixin class for estimators that can take (GPU and host) string inputs.
"""
@staticmethod
def _more_static_tags():
return {
"X_types_gpu": ["2darray", "string"],
"X_types": ["2darray", "string"],
}
class AllowNaNTagMixin:
"""
Mixin class for estimators that allow NaNs in their inputs.
"""
@staticmethod
def _more_static_tags():
return {"allow_nan": True}
###############################################################################
# Other Mixins #
###############################################################################
class StatelessTagMixin:
"""
Mixin class for estimators that are stateless.
"""
@staticmethod
def _more_static_tags():
return {"stateless": True}
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/logger.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
IF GPUBUILD == 0:
import logging
IF GPUBUILD == 1:
import sys
from libcpp.string cimport string
from libcpp cimport bool
cdef extern from "cuml/common/logger.hpp" namespace "ML" nogil:
cdef cppclass Logger:
@staticmethod
Logger& get()
void setLevel(int level)
void setPattern(const string& pattern)
void setCallback(void(*callback)(int, char*))
void setFlush(void(*flush)())
void setCallback(void(*callback)(int, const char*) except *)
void setFlush(void(*flush)() except *)
bool shouldLogFor(int level) const
int getLevel() const
string getPattern() const
void flush()
cdef extern from "cuml/common/logger.hpp" nogil:
void CUML_LOG_TRACE(const char* fmt, ...)
void CUML_LOG_DEBUG(const char* fmt, ...)
void CUML_LOG_INFO(const char* fmt, ...)
void CUML_LOG_WARN(const char* fmt, ...)
void CUML_LOG_ERROR(const char* fmt, ...)
void CUML_LOG_CRITICAL(const char* fmt, ...)
cdef int CUML_LEVEL_TRACE
cdef int CUML_LEVEL_DEBUG
cdef int CUML_LEVEL_INFO
cdef int CUML_LEVEL_WARN
cdef int CUML_LEVEL_ERROR
cdef int CUML_LEVEL_CRITICAL
cdef int CUML_LEVEL_OFF
"""Enables all log messages upto and including `trace()`"""
level_trace = CUML_LEVEL_TRACE
"""Enables all log messages upto and including `debug()`"""
level_debug = CUML_LEVEL_DEBUG
"""Enables all log messages upto and including `info()`"""
level_info = CUML_LEVEL_INFO
"""Enables all log messages upto and including `warn()`"""
level_warn = CUML_LEVEL_WARN
"""Enables all log messages upto and include `error()`"""
level_error = CUML_LEVEL_ERROR
"""Enables only `critical()` messages"""
level_critical = CUML_LEVEL_CRITICAL
"""Disables all log messages"""
level_off = CUML_LEVEL_OFF
cdef void _log_callback(int lvl, const char * msg) with gil:
"""
Default spdlogs callback function to redirect logs correctly to sys.stdout
Parameters
----------
lvl : int
Level of the logging message as defined by spdlogs
msg : char *
Message to be logged
"""
print(msg.decode('utf-8'), end='')
cdef void _log_flush() with gil:
"""
Default spdlogs callback function to flush logs
"""
if sys.stdout is not None:
sys.stdout.flush()
class LogLevelSetter:
"""Internal "context manager" object for restoring previous log level"""
def __init__(self, prev_log_level):
self.prev_log_level = prev_log_level
def __enter__(self):
pass
def __exit__(self, a, b, c):
IF GPUBUILD == 1:
Logger.get().setLevel(<int>self.prev_log_level)
def set_level(level):
"""
Set logging level. This setting will be persistent from here onwards until
the end of the process, if left unchanged afterwards.
Examples
--------
.. code-block:: python
# regular usage of setting a logging level for all subsequent logs
# in this case, it will enable all logs upto and including `info()`
logger.set_level(logger.level_info)
# in case one wants to temporarily set the log level for a code block
with logger.set_level(logger.level_debug) as _:
logger.debug("Hello world!")
Parameters
----------
level : int
Logging level to be set. \
It must be one of cuml.internals.logger.LEVEL_*
Returns
-------
context_object : LogLevelSetter
This is useful if one wants to temporarily set a different logging
level for a code section, as described in the example section above.
"""
IF GPUBUILD == 1:
cdef int prev = Logger.get().getLevel()
context_object = LogLevelSetter(prev)
Logger.get().setLevel(<int>level)
return context_object
class PatternSetter:
"""Internal "context manager" object for restoring previous log pattern"""
def __init__(self, prev_pattern):
self.prev_pattern = prev_pattern
def __enter__(self):
pass
def __exit__(self, a, b, c):
IF GPUBUILD == 1:
cdef string s = self.prev_pattern.encode("utf-8")
Logger.get().setPattern(s)
def set_pattern(pattern):
"""
Set the logging pattern. This setting will be persistent from here onwards
until the end of the process, if left unchanged afterwards.
Examples
--------
>>> # regular usage of setting a logging pattern for all subsequent logs
>>> import cuml.internals.logger as logger
>>> logger.set_pattern("--> [%H-%M-%S] %v")
<cuml.internals.logger.PatternSetter object at 0x...>
>>> # in case one wants to temporarily set the pattern for a code block
>>> with logger.set_pattern("--> [%H-%M-%S] %v") as _:
... logger.info("Hello world!")
--> [...] Hello world!
Parameters
----------
pattern : str
Logging pattern string. Refer to this wiki page for its syntax:
https://github.com/gabime/spdlog/wiki/3.-Custom-formatting
Returns
-------
context_object : PatternSetter
This is useful if one wants to temporarily set a different logging
pattern for a code section, as described in the example section above.
"""
IF GPUBUILD == 1:
cdef string prev = Logger.get().getPattern()
context_object = PatternSetter(prev.decode("UTF-8"))
cdef string s = pattern.encode("UTF-8")
Logger.get().setPattern(s)
return context_object
def should_log_for(level):
"""
Check if messages at the given logging level will be logged or not. This is
a useful check to avoid doing unnecessary logging work.
Examples
--------
.. code-block:: python
if logger.should_log_for(level_info):
# which could waste precious CPU cycles
my_message = construct_message()
logger.info(my_message)
Parameters
----------
level : int
Logging level to be set. \
It must be one of cuml.common.logger.level_*
"""
IF GPUBUILD == 1:
return Logger.get().shouldLogFor(<int>level)
def trace(msg):
"""
Logs a trace message, if it is enabled.
Examples
--------
.. code-block:: python
logger.trace("Hello world! This is a trace message")
Parameters
----------
msg : str
Message to be logged.
"""
IF GPUBUILD == 1:
cdef string s = msg.encode("UTF-8")
CUML_LOG_TRACE(s.c_str())
ELSE:
logging.debug(msg)
def debug(msg):
"""
Logs a debug message, if it is enabled.
Examples
--------
.. code-block:: python
logger.debug("Hello world! This is a debug message")
Parameters
----------
msg : str
Message to be logged.
"""
IF GPUBUILD == 1:
cdef string s = msg.encode("UTF-8")
CUML_LOG_DEBUG(s.c_str())
ELSE:
logging.debug(msg)
def info(msg):
"""
Logs an info message, if it is enabled.
Examples
--------
.. code-block:: python
logger.info("Hello world! This is a info message")
Parameters
----------
msg : str
Message to be logged.
"""
IF GPUBUILD == 1:
cdef string s = msg.encode("UTF-8")
CUML_LOG_INFO(s.c_str())
ELSE:
logging.info(msg)
def warn(msg):
"""
Logs a warning message, if it is enabled.
Examples
--------
.. code-block:: python
logger.warn("Hello world! This is a warning message")
Parameters
----------
msg : str
Message to be logged.
"""
IF GPUBUILD == 1:
cdef string s = msg.encode("UTF-8")
CUML_LOG_WARN(s.c_str())
ELSE:
logging.warning(msg)
def error(msg):
"""
Logs an error message, if it is enabled.
Examples
--------
.. code-block:: python
logger.error("Hello world! This is a error message")
Parameters
----------
msg : str
Message to be logged.
"""
IF GPUBUILD == 1:
cdef string s = msg.encode("UTF-8")
CUML_LOG_ERROR(s.c_str())
ELSE:
logging.error(msg)
def critical(msg):
"""
Logs a critical message, if it is enabled.
Examples
--------
.. code-block:: python
logger.critical("Hello world! This is a critical message")
Parameters
----------
msg : str
Message to be logged.
"""
IF GPUBUILD == 1:
cdef string s = msg.encode("UTF-8")
CUML_LOG_CRITICAL(s.c_str())
ELSE:
logging.critical(msg)
def flush():
"""
Flush the logs.
"""
IF GPUBUILD == 1:
Logger.get().flush()
IF GPUBUILD == 1:
# Set callback functions to handle redirected sys.stdout in Python
Logger.get().setCallback(_log_callback)
Logger.get().setFlush(_log_flush)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/base_return_types.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml.internals
import typing
from cuml.internals.array import CumlArray
from cuml.internals.array_sparse import SparseCumlArray
def _get_base_return_type(class_name, attr):
if (
not hasattr(attr, "__annotations__")
or "return" not in attr.__annotations__
):
return None
try:
type_hints = typing.get_type_hints(attr)
if "return" in type_hints:
ret_type = type_hints["return"]
is_generic = isinstance(ret_type, typing._GenericAlias)
if is_generic:
return _process_generic(ret_type)
elif issubclass(ret_type, CumlArray):
return "array"
elif issubclass(ret_type, SparseCumlArray):
return "sparsearray"
elif issubclass(ret_type, cuml.internals.base.Base):
return "base"
else:
return None
except NameError:
# A NameError is raised if the return type is the same as the
# type being defined (which is incomplete). Check that here and
# return base if the name matches
# Cython 3 changed to preferring types rather than strings for
# annotations. Strings end up wrapped in an extra layer of quotes,
# which we have to replace here.
if attr.__annotations__["return"].replace("'", "") == class_name:
return "base"
except Exception:
assert False, "Shouldn't get here"
return None
return None
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/available_devices.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.device_support import GPU_ENABLED
from cuml.internals.safe_imports import gpu_only_import_from, UnavailableError
try:
from functools import cache # requires Python >= 3.9
except ImportError:
from functools import lru_cache
cache = lru_cache(maxsize=None)
def gpu_available_no_context_creation():
"""
Function tries to check if GPUs are available in the system without
creating a CUDA context. We check for CuPy presence as a proxy of that.
"""
try:
import cupy
return True
except ImportError:
return False
@cache
def is_cuda_available():
try:
return GPU_ENABLED and gpu_available_no_context_creation()
except UnavailableError:
return False
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/internals/mem_type.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from enum import Enum, auto
from cuml.internals.device_support import GPU_ENABLED
from cuml.internals.safe_imports import cpu_only_import, gpu_only_import
cudf = gpu_only_import("cudf")
cp = gpu_only_import("cupy")
cpx_sparse = gpu_only_import("cupyx.scipy.sparse")
np = cpu_only_import("numpy")
pandas = cpu_only_import("pandas")
scipy_sparse = cpu_only_import("scipy.sparse")
class MemoryTypeError(Exception):
"""An exception thrown to indicate inconsistent memory type selection"""
class MemoryType(Enum):
device = auto()
host = auto()
managed = auto()
mirror = auto()
@classmethod
def from_str(cls, memory_type):
if isinstance(memory_type, str):
memory_type = memory_type.lower()
elif isinstance(memory_type, cls):
return memory_type
try:
return cls[memory_type]
except KeyError:
raise ValueError(
'Parameter memory_type must be one of "device", '
'"host", "managed" or "mirror"'
)
@property
def xpy(self):
if self is MemoryType.host or (
self is MemoryType.mirror and not GPU_ENABLED
):
return np
else:
return cp
@property
def xdf(self):
if self is MemoryType.host or (
self is MemoryType.mirror and not GPU_ENABLED
):
return pandas
else:
return cudf
@property
def xsparse(self):
if self is MemoryType.host or (
self is MemoryType.mirror and not GPU_ENABLED
):
return scipy_sparse
else:
return cpx_sparse
@property
def is_device_accessible(self):
return self in (MemoryType.device, MemoryType.managed)
@property
def is_host_accessible(self):
return self in (MemoryType.host, MemoryType.managed)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("base.pyx" ${linearregression_algo} ${elastic_net_algo} ${ridge_algo} ${linear_model_algo})
add_module_gpu_default("linear_regression.pyx" ${linearregression_algo} ${linear_model_algo})
add_module_gpu_default("elastic_net.pyx" ${elasticnet_algo} ${linear_model_algo})
add_module_gpu_default("logistic_regression.pyx" ${logisticregression_algo} ${linear_model_algo})
add_module_gpu_default("mbsgd_classifier.pyx" ${mbsgd_classifier_algo} ${linear_model_algo})
add_module_gpu_default("mbsgd_regressor.pyx" ${mbsgd_regressor_algo} ${linear_model_algo})
add_module_gpu_default("ridge.pyx" ${ridge_algo} ${linear_model_algo})
if(NOT SINGLEGPU)
list(APPEND cython_sources
base_mg.pyx
linear_regression_mg.pyx
logistic_regression_mg.pyx
ridge_mg.pyx
)
endif()
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_mg_libraries}"
MODULE_PREFIX linear_model_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/logistic_regression.pyx | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
from cuml.internals.safe_imports import gpu_only_import
import pprint
import cuml.internals
from cuml.solvers import QN
from cuml.internals.base import UniversalBase
from cuml.internals.mixins import ClassifierMixin, FMajorInputTagMixin
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.array import CumlArray
from cuml.common.doc_utils import generate_docstring
import cuml.internals.logger as logger
from cuml.common import input_to_cuml_array
from cuml.common import using_output_type
from cuml.internals.api_decorators import device_interop_preparation
from cuml.internals.api_decorators import enable_device_interop
cp = gpu_only_import('cupy')
np = cpu_only_import('numpy')
supported_penalties = ["l1", "l2", "none", "elasticnet"]
supported_solvers = ["qn"]
class LogisticRegression(UniversalBase,
ClassifierMixin,
FMajorInputTagMixin):
"""
LogisticRegression is a linear model that is used to model probability of
occurrence of certain events, for example probability of success or fail of
an event.
cuML's LogisticRegression can take array-like objects, either in host as
NumPy arrays or in device (as Numba or `__cuda_array_interface__`
compliant), in addition to cuDF objects.
It provides both single-class (using sigmoid loss) and multiple-class
(using softmax loss) variants, depending on the input variables
Only one solver option is currently available: Quasi-Newton (QN)
algorithms. Even though it is presented as a single option, this solver
resolves to two different algorithms underneath:
- Orthant-Wise Limited Memory Quasi-Newton (OWL-QN) if there is l1
regularization
- Limited Memory BFGS (L-BFGS) otherwise.
Note that, just like in Scikit-learn, the bias will not be regularized.
Examples
--------
.. code-block:: python
>>> import cudf
>>> import numpy as np
>>> # Both import methods supported
>>> # from cuml import LogisticRegression
>>> from cuml.linear_model import LogisticRegression
>>> X = cudf.DataFrame()
>>> X['col1'] = np.array([1,1,2,2], dtype = np.float32)
>>> X['col2'] = np.array([1,2,2,3], dtype = np.float32)
>>> y = cudf.Series(np.array([0.0, 0.0, 1.0, 1.0], dtype=np.float32))
>>> reg = LogisticRegression()
>>> reg.fit(X,y)
LogisticRegression()
>>> print(reg.coef_)
0 1
0 0.69861 0.570058
>>> print(reg.intercept_)
0 -2.188...
dtype: float32
>>> X_new = cudf.DataFrame()
>>> X_new['col1'] = np.array([1,5], dtype = np.float32)
>>> X_new['col2'] = np.array([2,5], dtype = np.float32)
>>> preds = reg.predict(X_new)
>>> print(preds)
0 0.0
1 1.0
dtype: float32
Parameters
----------
penalty : 'none', 'l1', 'l2', 'elasticnet' (default = 'l2')
Used to specify the norm used in the penalization.
If 'none' or 'l2' are selected, then L-BFGS solver will be used.
If 'l1' is selected, solver OWL-QN will be used.
If 'elasticnet' is selected, OWL-QN will be used if l1_ratio > 0,
otherwise L-BFGS will be used.
tol : float (default = 1e-4)
Tolerance for stopping criteria.
The exact stopping conditions depend on the chosen solver.
Check the solver's documentation for more details:
* :class:`Quasi-Newton (L-BFGS/OWL-QN)<cuml.QN>`
C : float (default = 1.0)
Inverse of regularization strength; must be a positive float.
fit_intercept : boolean (default = True)
If True, the model tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
class_weight : dict or 'balanced', default=None
By default all classes have a weight one. However, a dictionary
can be provided with weights associated with classes
in the form ``{class_label: weight}``. The "balanced" mode
uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``. Note that
these weights will be multiplied with sample_weight
(passed through the fit method) if sample_weight is specified.
max_iter : int (default = 1000)
Maximum number of iterations taken for the solvers to converge.
linesearch_max_iter : int (default = 50)
Max number of linesearch iterations per outer iteration used in the
lbfgs and owl QN solvers.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with `0 <= l1_ratio <= 1`
solver : 'qn' (default='qn')
Algorithm to use in the optimization problem. Currently only `qn` is
supported, which automatically selects either L-BFGS or OWL-QN
depending on the conditions of the l1 regularization described
above.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
coef_: dev array, dim (n_classes, n_features) or (n_classes, n_features+1)
The estimated coefficients for the logistic regression model.
intercept_: device array (n_classes, 1)
The independent term. If `fit_intercept` is False, will be 0.
Notes
-----
cuML's LogisticRegression uses a different solver that the equivalent
Scikit-learn, except when there is no penalty and `solver=lbfgs` is
used in Scikit-learn. This can cause (smaller) differences in the
coefficients and predictions of the model, similar to
using different solvers in Scikit-learn.
For additional information, see `Scikit-learn's LogisticRegression
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html>`_.
"""
_cpu_estimator_import_path = 'sklearn.linear_model.LogisticRegression'
classes_ = CumlArrayDescriptor(order='F')
class_weight = CumlArrayDescriptor(order='F')
expl_spec_weights_ = CumlArrayDescriptor(order='F')
@device_interop_preparation
def __init__(
self,
*,
penalty="l2",
tol=1e-4,
C=1.0,
fit_intercept=True,
class_weight=None,
max_iter=1000,
linesearch_max_iter=50,
verbose=False,
l1_ratio=None,
solver="qn",
handle=None,
output_type=None,
):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
if penalty not in supported_penalties:
raise ValueError("`penalty` " + str(penalty) + "not supported.")
if solver not in supported_solvers:
raise ValueError("Only quasi-newton `qn` solver is "
" supported, not %s" % solver)
self.solver = solver
self.C = C
self.penalty = penalty
self.tol = tol
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.linesearch_max_iter = linesearch_max_iter
self.l1_ratio = None
if self.penalty == "elasticnet":
if l1_ratio is None:
raise ValueError(
"l1_ratio has to be specified for" "loss='elasticnet'"
)
if l1_ratio < 0.0 or l1_ratio > 1.0:
msg = "l1_ratio value has to be between 0.0 and 1.0"
raise ValueError(msg.format(l1_ratio))
self.l1_ratio = l1_ratio
l1_strength, l2_strength = self._get_qn_params()
loss = "sigmoid"
if class_weight is not None:
self._build_class_weights(class_weight)
else:
self.class_weight = None
self.solver_model = QN(
loss=loss,
fit_intercept=self.fit_intercept,
l1_strength=l1_strength,
l2_strength=l2_strength,
max_iter=self.max_iter,
linesearch_max_iter=self.linesearch_max_iter,
tol=self.tol,
verbose=self.verbose,
handle=self.handle,
)
if logger.should_log_for(logger.level_debug):
self.verb_prefix = "CY::"
logger.debug(self.verb_prefix + "Estimator parameters:")
logger.debug(pprint.pformat(self.__dict__))
else:
self.verb_prefix = ""
@generate_docstring(X='dense_sparse')
@cuml.internals.api_base_return_any(set_output_dtype=True)
@enable_device_interop
def fit(self, X, y, sample_weight=None,
convert_dtype=True) -> "LogisticRegression":
"""
Fit the model with X and y.
"""
self.n_features_in_ = X.shape[1] if X.ndim == 2 else 1
if hasattr(X, 'index'):
self.feature_names_in_ = X.index
# Converting y to device array here to use `unique` function
# since calling input_to_cuml_array again in QN has no cost
# Not needed to check dtype since qn class checks it already
y_m, n_rows, _, _ = input_to_cuml_array(y)
self.classes_ = cp.unique(y_m)
self._num_classes = len(self.classes_)
if self._num_classes == 2:
if self.classes_[0] != 0 or self.classes_[1] != 1:
raise ValueError("Only values of 0 and 1 are"
" supported for binary classification.")
if sample_weight is not None or self.class_weight is not None:
if sample_weight is None:
sample_weight = cp.ones(n_rows)
sample_weight, n_weights, D, _ = input_to_cuml_array(sample_weight)
if n_rows != n_weights or D != 1:
raise ValueError("sample_weight.shape == {}, "
"expected ({},)!".format(sample_weight.shape,
n_rows))
def check_expl_spec_weights():
with cuml.using_output_type("numpy"):
for c in self.expl_spec_weights_:
i = np.searchsorted(self.classes_, c)
if i >= self._num_classes or self.classes_[i] != c:
msg = "Class label {} not present.".format(c)
raise ValueError(msg)
if self.class_weight is not None:
if self.class_weight == 'balanced':
class_weight = n_rows / \
(self._num_classes *
cp.bincount(y_m.to_output('cupy')))
class_weight = CumlArray(class_weight)
else:
check_expl_spec_weights()
n_explicit = self.class_weight.shape[0]
if n_explicit != self._num_classes:
class_weight = cp.ones(self._num_classes)
class_weight[:n_explicit] = self.class_weight
class_weight = CumlArray(class_weight)
self.class_weight = class_weight
else:
class_weight = self.class_weight
out = y_m.to_output('cupy')
sample_weight *= class_weight[out].to_output('cupy')
sample_weight = CumlArray(sample_weight)
if self._num_classes > 2:
loss = "softmax"
else:
loss = "sigmoid"
if logger.should_log_for(logger.level_debug):
logger.debug(self.verb_prefix + "Setting loss to " + str(loss))
self.solver_model.loss = loss
if logger.should_log_for(logger.level_debug):
logger.debug(self.verb_prefix + "Calling QN fit " + str(loss))
self.solver_model.fit(X, y_m, sample_weight=sample_weight,
convert_dtype=convert_dtype)
# coefficients and intercept are contained in the same array
if logger.should_log_for(logger.level_debug):
logger.debug(
self.verb_prefix + "Setting coefficients " + str(loss)
)
if logger.should_log_for(logger.level_trace):
with using_output_type("cupy"):
logger.trace(self.verb_prefix + "Coefficients: " +
str(self.solver_model.coef_))
if self.fit_intercept:
logger.trace(
self.verb_prefix
+ "Intercept: "
+ str(self.solver_model.intercept_)
)
return self
@generate_docstring(X='dense_sparse',
return_values={'name': 'score',
'type': 'dense',
'description': 'Confidence score',
'shape': '(n_samples, n_classes)'})
@enable_device_interop
def decision_function(self, X, convert_dtype=True) -> CumlArray:
"""
Gives confidence score for X
"""
return self.solver_model._decision_function(
X,
convert_dtype=convert_dtype
)
@generate_docstring(X='dense_sparse',
return_values={'name': 'preds',
'type': 'dense',
'description': 'Predicted values',
'shape': '(n_samples, 1)'})
@cuml.internals.api_base_return_array(get_output_dtype=True)
@enable_device_interop
def predict(self, X, convert_dtype=True) -> CumlArray:
"""
Predicts the y for X.
"""
return self.solver_model.predict(X, convert_dtype=convert_dtype)
@generate_docstring(X='dense_sparse',
return_values={'name': 'preds',
'type': 'dense',
'description': 'Predicted class \
probabilities',
'shape': '(n_samples, n_classes)'})
@enable_device_interop
def predict_proba(self, X, convert_dtype=True) -> CumlArray:
"""
Predicts the class probabilities for each class in X
"""
return self._predict_proba_impl(
X,
convert_dtype=convert_dtype,
log_proba=False
)
@generate_docstring(X='dense_sparse',
return_values={'name': 'preds',
'type': 'dense',
'description': 'Logaright of predicted \
class probabilities',
'shape': '(n_samples, n_classes)'})
@enable_device_interop
def predict_log_proba(self, X, convert_dtype=True) -> CumlArray:
"""
Predicts the log class probabilities for each class in X
"""
return self._predict_proba_impl(
X,
convert_dtype=convert_dtype,
log_proba=True
)
def _predict_proba_impl(self,
X,
convert_dtype=False,
log_proba=False) -> CumlArray:
_num_classes = self.classes_.shape[0]
scores = cp.asarray(
self.decision_function(X, convert_dtype=convert_dtype), order="F"
).T
if _num_classes == 2:
proba = cp.zeros((scores.shape[0], 2))
proba[:, 1] = 1 / (1 + cp.exp(-scores.ravel()))
proba[:, 0] = 1 - proba[:, 1]
elif _num_classes > 2:
max_scores = cp.max(scores, axis=1).reshape((-1, 1))
scores -= max_scores
proba = cp.exp(scores)
row_sum = cp.sum(proba, axis=1).reshape((-1, 1))
proba /= row_sum
if log_proba:
proba = cp.log(proba)
return proba
def _get_qn_params(self):
if self.penalty == "none":
l1_strength = 0.0
l2_strength = 0.0
elif self.penalty == "l1":
l1_strength = 1.0 / self.C
l2_strength = 0.0
elif self.penalty == "l2":
l1_strength = 0.0
l2_strength = 1.0 / self.C
else:
strength = 1.0 / self.C
l1_strength = self.l1_ratio * strength
l2_strength = (1.0 - self.l1_ratio) * strength
return l1_strength, l2_strength
def _build_class_weights(self, class_weight):
if class_weight == 'balanced':
self.class_weight = 'balanced'
else:
classes = list(class_weight.keys())
weights = list(class_weight.values())
max_class = sorted(classes)[-1]
class_weight = cp.ones(max_class + 1)
class_weight[classes] = weights
self.class_weight, _, _, _ = input_to_cuml_array(class_weight)
self.expl_spec_weights_, _, _, _ = \
input_to_cuml_array(np.array(classes))
def set_params(self, **params):
super().set_params(**params)
rebuild_params = False
# Remove class-specific parameters
for param_name in ['penalty', 'l1_ratio', 'C']:
if param_name in params:
params.pop(param_name)
rebuild_params = True
if rebuild_params:
# re-build QN solver parameters
l1_strength, l2_strength = self._get_qn_params()
params.update({'l1_strength': l1_strength,
'l2_strength': l2_strength})
if 'class_weight' in params:
# re-build class weight
class_weight = params.pop('class_weight')
self._build_class_weights(class_weight)
# Update solver
self.solver_model.set_params(**params)
return self
@property
@cuml.internals.api_base_return_array_skipall
def coef_(self):
return self.solver_model.coef_
@coef_.setter
def coef_(self, value):
self.solver_model.coef_ = value
@property
@cuml.internals.api_base_return_array_skipall
def intercept_(self):
return self.solver_model.intercept_
@intercept_.setter
def intercept_(self, value):
self.solver_model.intercept_ = value
def get_param_names(self):
return super().get_param_names() + [
"penalty",
"tol",
"C",
"fit_intercept",
"class_weight",
"max_iter",
"linesearch_max_iter",
"l1_ratio",
"solver",
]
def __getstate__(self):
state = self.__dict__.copy()
return state
def __setstate__(self, state):
super().__init__(handle=None,
verbose=state["verbose"])
self.__dict__.update(state)
def get_attr_names(self):
return ['classes_', 'intercept_', 'coef_', 'n_features_in_',
'feature_names_in_']
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/ridge_mg.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
rmm = gpu_only_import('rmm')
from libcpp cimport bool
from libc.stdint cimport uintptr_t
from cython.operator cimport dereference as deref
import cuml.internals
from pylibraft.common.handle cimport handle_t
from cuml.common.opg_data_utils_mg cimport *
from cuml.decomposition.utils cimport *
from cuml.linear_model import Ridge
from cuml.linear_model.base_mg import MGFitMixin
cdef extern from "cuml/linear_model/ridge_mg.hpp" namespace "ML::Ridge::opg":
cdef void fit(handle_t& handle,
vector[floatData_t *] input_data,
PartDescriptor &input_desc,
vector[floatData_t *] labels,
float *alpha,
int n_alpha,
float *coef,
float *intercept,
bool fit_intercept,
bool normalize,
int algo,
bool verbose) except +
cdef void fit(handle_t& handle,
vector[doubleData_t *] input_data,
PartDescriptor &input_desc,
vector[doubleData_t *] labels,
double *alpha,
int n_alpha,
double *coef,
double *intercept,
bool fit_intercept,
bool normalize,
int algo,
bool verbose) except +
class RidgeMG(MGFitMixin, Ridge):
def __init__(self, **kwargs):
super(RidgeMG, self).__init__(**kwargs)
@cuml.internals.api_base_return_any_skipall
def _fit(self, X, y, coef_ptr, input_desc):
cdef float float_intercept
cdef double double_intercept
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef float float_alpha
cdef double double_alpha
# Only one alpha is supported.
self.n_alpha = 1
if self.dtype == np.float32:
float_alpha = self.alpha
fit(handle_[0],
deref(<vector[floatData_t*]*><uintptr_t>X),
deref(<PartDescriptor*><uintptr_t>input_desc),
deref(<vector[floatData_t*]*><uintptr_t>y),
<float*>&float_alpha,
<int>self.n_alpha,
<float*><size_t>coef_ptr,
<float*>&float_intercept,
<bool>self.fit_intercept,
<bool>self.normalize,
<int>self.algo,
False)
self.intercept_ = float_intercept
else:
double_alpha = self.alpha
fit(handle_[0],
deref(<vector[doubleData_t*]*><uintptr_t>X),
deref(<PartDescriptor*><uintptr_t>input_desc),
deref(<vector[doubleData_t*]*><uintptr_t>y),
<double*>&double_alpha,
<int>self.n_alpha,
<double*><size_t>coef_ptr,
<double*>&double_intercept,
<bool>self.fit_intercept,
<bool>self.normalize,
<int>self.algo,
False)
self.intercept_ = double_intercept
self.handle.sync()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/linear_regression_mg.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
rmm = gpu_only_import('rmm')
from libcpp cimport bool
from libc.stdint cimport uintptr_t
from cython.operator cimport dereference as deref
import cuml.internals
from pylibraft.common.handle cimport handle_t
from cuml.common.opg_data_utils_mg cimport *
from cuml.decomposition.utils cimport *
from cuml.linear_model import LinearRegression
from cuml.linear_model.base_mg import MGFitMixin
cdef extern from "cuml/linear_model/ols_mg.hpp" namespace "ML::OLS::opg":
cdef void fit(handle_t& handle,
vector[floatData_t *] input_data,
PartDescriptor &input_desc,
vector[floatData_t *] labels,
float *coef,
float *intercept,
bool fit_intercept,
bool normalize,
int algo,
bool verbose) except +
cdef void fit(handle_t& handle,
vector[doubleData_t *] input_data,
PartDescriptor &input_desc,
vector[doubleData_t *] labels,
double *coef,
double *intercept,
bool fit_intercept,
bool normalize,
int algo,
bool verbose) except +
class LinearRegressionMG(MGFitMixin, LinearRegression):
def __init__(self, **kwargs):
super(LinearRegressionMG, self).__init__(**kwargs)
@cuml.internals.api_base_return_any_skipall
def _fit(self, X, y, coef_ptr, input_desc):
cdef float float_intercept
cdef double double_intercept
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if self.dtype == np.float32:
fit(handle_[0],
deref(<vector[floatData_t*]*><uintptr_t>X),
deref(<PartDescriptor*><uintptr_t>input_desc),
deref(<vector[floatData_t*]*><uintptr_t>y),
<float*><size_t>coef_ptr,
<float*>&float_intercept,
<bool>self.fit_intercept,
<bool>self.normalize,
<int>self.algo,
False)
self.intercept_ = float_intercept
else:
fit(handle_[0],
deref(<vector[doubleData_t*]*><uintptr_t>X),
deref(<PartDescriptor*><uintptr_t>input_desc),
deref(<vector[doubleData_t*]*><uintptr_t>y),
<double*><size_t>coef_ptr,
<double*>&double_intercept,
<bool>self.fit_intercept,
<bool>self.normalize,
<int>self.algo,
False)
self.intercept_ = double_intercept
self.handle.sync()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/base.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import cuml.internals
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
from libc.stdint cimport uintptr_t
from cuml.internals.array import CumlArray
from cuml.internals.input_utils import input_to_cuml_array
from cuml.common.doc_utils import generate_docstring
from cuml.internals.api_decorators import enable_device_interop
IF GPUBUILD == 1:
from pylibraft.common.handle cimport handle_t
cdef extern from "cuml/linear_model/glm.hpp" namespace "ML::GLM":
cdef void gemmPredict(handle_t& handle,
const float *input,
size_t _n_rows,
size_t _n_cols,
const float *coef,
float intercept,
float *preds) except +
cdef void gemmPredict(handle_t& handle,
const double *input,
size_t _n_rows,
size_t _n_cols,
const double *coef,
double intercept,
double *preds) except +
class LinearPredictMixin:
@generate_docstring(return_values={'name': 'preds',
'type': 'dense',
'description': 'Predicted values',
'shape': '(n_samples, 1)'})
@cuml.internals.api_base_return_array_skipall
@enable_device_interop
def predict(self, X, convert_dtype=True) -> CumlArray:
"""
Predicts `y` values for `X`.
"""
self.dtype = self.coef_.dtype
if self.coef_ is None:
raise ValueError(
"LinearModel.predict() cannot be called before fit(). "
"Please fit the model first."
)
self.dtype = self.coef_.dtype
if len(self.coef_.shape) == 2 and self.coef_.shape[0] > 1:
# Handle multi-target prediction in Python.
coef_arr = CumlArray.from_input(self.coef_).to_output('array')
X_arr = CumlArray.from_input(
X,
check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype else None),
check_cols=self.n_features_in_
).to_output('array')
intercept_arr = CumlArray.from_input(
self.intercept_
).to_output('array')
preds_arr = X_arr @ coef_arr + intercept_arr
return preds_arr
# Handle single-target prediction in C++
X_m, _n_rows, _n_cols, dtype = \
input_to_cuml_array(X, check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_cols=self.n_features_in_)
cdef uintptr_t _X_ptr = X_m.ptr
cdef uintptr_t _coef_ptr = self.coef_.ptr
preds = CumlArray.zeros(_n_rows, dtype=dtype, index=X_m.index)
cdef uintptr_t _preds_ptr = preds.ptr
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if dtype.type == np.float32:
gemmPredict(handle_[0],
<float*>_X_ptr,
<size_t>_n_rows,
<size_t>_n_cols,
<float*>_coef_ptr,
<float>self.intercept_,
<float*>_preds_ptr)
else:
gemmPredict(handle_[0],
<double*>_X_ptr,
<size_t>_n_rows,
<size_t>_n_cols,
<double*>_coef_ptr,
<double>self.intercept_,
<double*>_preds_ptr)
self.handle.sync()
return preds
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/lasso.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.linear_model.elastic_net import ElasticNet
from cuml.internals.api_decorators import device_interop_preparation
class Lasso(ElasticNet):
"""
Lasso extends LinearRegression by providing L1 regularization on the
coefficients when predicting response y with a linear combination of the
predictors in X. It can zero some of the coefficients for feature
selection and improves the conditioning of the problem.
cuML's Lasso can take array-like objects, either in host as
NumPy arrays or in device (as Numba or `__cuda_array_interface__`
compliant), in addition to cuDF objects. It uses coordinate descent to fit
a linear model.
This estimator supports cuML's experimental device selection capabilities.
It can be configured to run on either the CPU or the GPU.
To learn more, please see :ref:`device-selection`.
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import cudf
>>> from cuml.linear_model import Lasso
>>> ls = Lasso(alpha = 0.1, solver='qn')
>>> X = cudf.DataFrame()
>>> X['col1'] = np.array([0, 1, 2], dtype = np.float32)
>>> X['col2'] = np.array([0, 1, 2], dtype = np.float32)
>>> y = cudf.Series( np.array([0.0, 1.0, 2.0], dtype = np.float32) )
>>> result_lasso = ls.fit(X, y)
>>> print(result_lasso.coef_)
0 0.425
1 0.425
dtype: float32
>>> print(result_lasso.intercept_)
0.150000...
>>> X_new = cudf.DataFrame()
>>> X_new['col1'] = np.array([3,2], dtype = np.float32)
>>> X_new['col2'] = np.array([5,5], dtype = np.float32)
>>> preds = result_lasso.predict(X_new)
>>> print(preds)
0 3.549997
1 3.124997
dtype: float32
Parameters
----------
alpha : float (default = 1.0)
Constant that multiplies the L1 term.
alpha = 0 is equivalent to an ordinary least square, solved by the
LinearRegression object.
For numerical reasons, using alpha = 0 with the Lasso object is not
advised.
Given this, you should use the LinearRegression object.
fit_intercept : boolean (default = True)
If True, Lasso tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
normalize : boolean (default = False)
If True, the predictors in X will be normalized by dividing by the
column-wise standard deviation.
If False, no scaling will be done.
Note: this is in contrast to sklearn's deprecated `normalize` flag,
which divides by the column-wise L2 norm; but this is the same as if
using sklearn's StandardScaler.
max_iter : int (default = 1000)
The maximum number of iterations
tol : float (default = 1e-3)
The tolerance for the optimization: if the updates are smaller than
tol, the optimization code checks the dual gap for optimality and
continues until it is smaller than tol.
solver : {'cd', 'qn'} (default='cd')
Choose an algorithm:
* 'cd' - coordinate descent
* 'qn' - quasi-newton
You may find the alternative 'qn' algorithm is faster when the number
of features is sufficiently large, but the sample size is small.
selection : {'cyclic', 'random'} (default='cyclic')
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default.
This (setting to 'random') often leads to significantly faster
convergence especially when tol is higher than 1e-4.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
Attributes
----------
coef_ : array, shape (n_features)
The estimated coefficients for the linear regression model.
intercept_ : array
The independent term. If `fit_intercept` is False, will be 0.
Notes
-----
For additional docs, see `scikitlearn's Lasso
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html>`_.
"""
_cpu_estimator_import_path = "sklearn.linear_model.Lasso"
@device_interop_preparation
def __init__(
self,
*,
alpha=1.0,
fit_intercept=True,
normalize=False,
max_iter=1000,
tol=1e-3,
solver="cd",
selection="cyclic",
handle=None,
output_type=None,
verbose=False,
):
# Lasso is just a special case of ElasticNet
super().__init__(
l1_ratio=1.0,
alpha=alpha,
fit_intercept=fit_intercept,
normalize=normalize,
max_iter=max_iter,
tol=tol,
solver=solver,
selection=selection,
handle=handle,
output_type=output_type,
verbose=verbose,
)
def get_param_names(self):
return list(set(super().get_param_names()) - {"l1_ratio"})
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/mbsgd_regressor.pyx | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.internals.base import Base
from cuml.internals.mixins import RegressorMixin
from cuml.common.doc_utils import generate_docstring
from cuml.internals.mixins import FMajorInputTagMixin
from cuml.solvers import SGD
class MBSGDRegressor(Base,
RegressorMixin,
FMajorInputTagMixin):
"""
Linear regression model fitted by minimizing a
regularized empirical loss with mini-batch SGD.
The MBSGD Regressor implementation is experimental and and it uses a
different algorithm than sklearn's SGDClassifier. In order to improve
the results obtained from cuML's MBSGD Regressor:
* Reduce the batch size
* Increase the eta0
* Increase the number of iterations
Since cuML is analyzing the data in batches using a small eta0 might
not let the model learn as much as scikit learn does. Furthermore,
decreasing the batch size might seen an increase in the time required
to fit the model.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> import cudf
>>> from cuml.linear_model import MBSGDRegressor as cumlMBSGDRegressor
>>> X = cudf.DataFrame()
>>> X['col1'] = cp.array([1,1,2,2], dtype = cp.float32)
>>> X['col2'] = cp.array([1,2,2,3], dtype = cp.float32)
>>> y = cudf.Series(cp.array([1, 1, 2, 2], dtype=cp.float32))
>>> pred_data = cudf.DataFrame()
>>> pred_data['col1'] = cp.asarray([3, 2], dtype=cp.float32)
>>> pred_data['col2'] = cp.asarray([5, 5], dtype=cp.float32)
>>> cu_mbsgd_regressor = cumlMBSGDRegressor(learning_rate='constant',
... eta0=0.05, epochs=2000,
... fit_intercept=True,
... batch_size=1, tol=0.0,
... penalty='l2',
... loss='squared_loss',
... alpha=0.5)
>>> cu_mbsgd_regressor.fit(X, y)
MBSGDRegressor()
>>> print("cuML intercept : ", cu_mbsgd_regressor.intercept_)
cuML intercept : 0.725...
>>> print("cuML coef : ", cu_mbsgd_regressor.coef_)
cuML coef : 0 0.273...
1 0.182...
dtype: float32
>>> cu_pred = cu_mbsgd_regressor.predict(pred_data)
>>> print("cuML predictions : ", cu_pred)
cuML predictions : 0 2.456...
1 2.183...
dtype: float32
Parameters
----------
loss : 'squared_loss' (default = 'squared_loss')
'squared_loss' uses linear regression
penalty : 'none', 'l1', 'l2', 'elasticnet' (default = 'l2')
'none' does not perform any regularization
'l1' performs L1 norm (Lasso) which minimizes the sum of the abs value
of coefficients
'l2' performs L2 norm (Ridge) which minimizes the sum of the square of
the coefficients
'elasticnet' performs Elastic Net regularization which is a weighted
average of L1 and L2 norms
alpha : float (default = 0.0001)
The constant value which decides the degree of regularization
fit_intercept : boolean (default = True)
If True, the model tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
l1_ratio : float (default=0.15)
The l1_ratio is used only when `penalty = elasticnet`. The value for
l1_ratio should be `0 <= l1_ratio <= 1`. When `l1_ratio = 0` then the
`penalty = 'l2'` and if `l1_ratio = 1` then `penalty = 'l1'`
batch_size : int (default = 32)
It sets the number of samples that will be included in each batch.
epochs : int (default = 1000)
The number of times the model should iterate through the entire dataset
during training (default = 1000)
tol : float (default = 1e-3)
The training process will stop if current_loss > previous_loss - tol
shuffle : boolean (default = True)
True, shuffles the training data after each epoch
False, does not shuffle the training data after each epoch
eta0 : float (default = 0.001)
Initial learning rate
power_t : float (default = 0.5)
The exponent used for calculating the invscaling learning rate
learning_rate : {'optimal', 'constant', 'invscaling', 'adaptive'} \
(default = 'constant')
`optimal` option will be supported in a future version
`constant` keeps the learning rate constant
`adaptive` changes the learning rate if the training loss or the
validation accuracy does not improve for `n_iter_no_change` epochs.
The old learning rate is generally divided by 5
n_iter_no_change : int (default = 5)
the number of epochs to train without any improvement in the model
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Notes
-----
For additional docs, see `scikitlearn's SGDRegressor
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html>`_.
"""
def __init__(self, *, loss='squared_loss', penalty='l2', alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, epochs=1000, tol=1e-3,
shuffle=True, learning_rate='constant', eta0=0.001,
power_t=0.5, batch_size=32, n_iter_no_change=5, handle=None,
verbose=False, output_type=None):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
if loss in ['squared_loss']:
self.loss = loss
else:
msg = "loss {!r} is not supported"
raise TypeError(msg.format(loss))
self.penalty = penalty
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.epochs = epochs
self.tol = tol
self.shuffle = shuffle
self.learning_rate = learning_rate
self.eta0 = eta0
self.power_t = power_t
self.batch_size = batch_size
self.n_iter_no_change = n_iter_no_change
self.solver_model = SGD(**self.get_params())
@generate_docstring()
def fit(self, X, y, convert_dtype=True) -> "MBSGDRegressor":
"""
Fit the model with X and y.
"""
self.solver_model.fit(X, y, convert_dtype=convert_dtype)
return self
@generate_docstring(return_values={'name': 'preds',
'type': 'dense',
'description': 'Predicted values',
'shape': '(n_samples, 1)'})
@cuml.internals.api_base_return_array_skipall
def predict(self, X, convert_dtype=False) -> CumlArray:
"""
Predicts the y for X.
"""
preds = self.solver_model.predict(X,
convert_dtype=convert_dtype)
return preds
def set_params(self, **params):
super().set_params(**params)
self.solver_model.set_params(**params)
return self
def get_param_names(self):
return super().get_param_names() + [
"loss",
"penalty",
"alpha",
"l1_ratio",
"fit_intercept",
"epochs",
"tol",
"shuffle",
"learning_rate",
"eta0",
"power_t",
"batch_size",
"n_iter_no_change",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/linear_regression.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
import warnings
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
from libc.stdint cimport uintptr_t
from cuml.internals.array import CumlArray
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.base import UniversalBase
from cuml.internals.mixins import RegressorMixin, FMajorInputTagMixin
from cuml.common.doc_utils import generate_docstring
from cuml.linear_model.base import LinearPredictMixin
from cuml.common import input_to_cuml_array
from cuml.internals.api_decorators import device_interop_preparation
from cuml.internals.api_decorators import enable_device_interop
IF GPUBUILD == 1:
from libcpp cimport bool
from pylibraft.common.handle cimport handle_t
from pylibraft.common.handle import Handle
cdef extern from "cuml/linear_model/glm.hpp" namespace "ML::GLM":
cdef void olsFit(handle_t& handle,
float *input,
size_t n_rows,
size_t n_cols,
float *labels,
float *coef,
float *intercept,
bool fit_intercept,
bool normalize,
int algo,
float *sample_weight) except +
cdef void olsFit(handle_t& handle,
double *input,
size_t n_rows,
size_t n_cols,
double *labels,
double *coef,
double *intercept,
bool fit_intercept,
bool normalize,
int algo,
double *sample_weight) except +
def divide_non_zero(x1, x2):
# Value chosen to be consistent with the RAFT implementation in
# linalg/detail/lstsq.cuh
eps = 1e-10
# Do not divide by values of x2 that are smaller than eps
mask = abs(x2) < eps
x2[mask] = 1.
return x1 / x2
def fit_multi_target(X, y, fit_intercept=True, sample_weight=None):
X = CumlArray.from_input(X)
y = CumlArray.from_input(y)
assert X.ndim == 2
assert y.ndim == 2
if sample_weight is not None:
sample_weight = CumlArray.from_input(sample_weight)
x_rows, x_cols = X.shape
if x_cols == 0:
raise ValueError(
"Number of columns cannot be less than one"
)
if x_rows < 2:
raise ValueError(
"Number of rows cannot be less than two"
)
X_arr = X.to_output('array')
y_arr = y.to_output('array')
if fit_intercept:
# Add column containing ones to fit intercept.
nrow, ncol = X.shape
X_wide = X.mem_type.xpy.empty_like(
X_arr, shape=(nrow, ncol + 1)
)
X_wide[:, :ncol] = X_arr
X_wide[:, ncol] = 1.
X_arr = X_wide
if sample_weight is not None:
sample_weight = X.mem_type.xpy.sqrt(sample_weight)
X_arr = sample_weight[:, None] * X_arr
y_arr = sample_weight[:, None] * y_arr
u, s, vh = X.mem_type.xpy.linalg.svd(X_arr, full_matrices=False)
params = vh.T @ divide_non_zero(u.T @ y_arr, s[:, None])
coef = params[:-1] if fit_intercept else params
intercept = params[-1] if fit_intercept else None
return (
CumlArray.from_input(coef),
None if intercept is None else CumlArray.from_input(intercept)
)
class LinearRegression(LinearPredictMixin,
UniversalBase,
RegressorMixin,
FMajorInputTagMixin):
"""
LinearRegression is a simple machine learning model where the response y is
modelled by a linear combination of the predictors in X.
cuML's LinearRegression expects either a cuDF DataFrame or a NumPy matrix
and provides 2 algorithms SVD and Eig to fit a linear model. SVD is more
stable, but Eig (default) is much faster.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> import cudf
>>> # Both import methods supported
>>> from cuml import LinearRegression
>>> from cuml.linear_model import LinearRegression
>>> lr = LinearRegression(fit_intercept = True, normalize = False,
... algorithm = "eig")
>>> X = cudf.DataFrame()
>>> X['col1'] = cp.array([1,1,2,2], dtype=cp.float32)
>>> X['col2'] = cp.array([1,2,2,3], dtype=cp.float32)
>>> y = cudf.Series(cp.array([6.0, 8.0, 9.0, 11.0], dtype=cp.float32))
>>> reg = lr.fit(X,y)
>>> print(reg.coef_)
0 1.0
1 2.0
dtype: float32
>>> print(reg.intercept_)
3.0...
>>> X_new = cudf.DataFrame()
>>> X_new['col1'] = cp.array([3,2], dtype=cp.float32)
>>> X_new['col2'] = cp.array([5,5], dtype=cp.float32)
>>> preds = lr.predict(X_new)
>>> print(preds) # doctest: +SKIP
0 15.999...
1 14.999...
dtype: float32
Parameters
----------
algorithm : {'svd', 'eig', 'qr', 'svd-qr', 'svd-jacobi'}, (default = 'eig')
Choose an algorithm:
* 'svd' - alias for svd-jacobi;
* 'eig' - use an eigendecomposition of the covariance matrix;
* 'qr' - use QR decomposition algorithm and solve `Rx = Q^T y`
* 'svd-qr' - compute SVD decomposition using QR algorithm
* 'svd-jacobi' - compute SVD decomposition using Jacobi iterations.
Among these algorithms, only 'svd-jacobi' supports the case when the
number of features is larger than the sample size; this algorithm
is force-selected automatically in such a case.
For the broad range of inputs, 'eig' and 'qr' are usually the fastest,
followed by 'svd-jacobi' and then 'svd-qr'. In theory, SVD-based
algorithms are more stable.
fit_intercept : boolean (default = True)
If True, LinearRegression tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
copy_X : bool, default=True
If True, it is guaranteed that a copy of X is created, leaving the
original X unchanged. However, if set to False, X may be modified
directly, which would reduce the memory usage of the estimator.
normalize : boolean (default = False)
This parameter is ignored when `fit_intercept` is set to False.
If True, the predictors in X will be normalized by dividing by the
column-wise standard deviation.
If False, no scaling will be done.
Note: this is in contrast to sklearn's deprecated `normalize` flag,
which divides by the column-wise L2 norm; but this is the same as if
using sklearn's StandardScaler.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
coef_ : array, shape (n_features)
The estimated coefficients for the linear regression model.
intercept_ : array
The independent term. If `fit_intercept` is False, will be 0.
Notes
-----
LinearRegression suffers from multicollinearity (when columns are
correlated with each other), and variance explosions from outliers.
Consider using Ridge Regression to fix the multicollinearity problem, and
consider maybe first DBSCAN to remove the outliers, or statistical analysis
to filter possible outliers.
**Applications of LinearRegression**
LinearRegression is used in regression tasks where one wants to predict
say sales or house prices. It is also used in extrapolation or time
series tasks, dynamic systems modelling and many other machine learning
tasks. This model should be first tried if the machine learning problem
is a regression task (predicting a continuous variable).
For additional information, see `scikitlearn's OLS documentation
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html>`__.
For an additional example see `the OLS notebook
<https://github.com/rapidsai/cuml/blob/main/notebooks/linear_regression_demo.ipynb>`__.
.. note:: Starting from version 23.08, the new 'copy_X' parameter defaults
to 'True', ensuring a copy of X is created after passing it to
fit(), preventing any changes to the input, but with increased
memory usage. This represents a change in behavior from previous
versions. With `copy_X=False` a copy might still be created if
necessary.
"""
_cpu_estimator_import_path = 'sklearn.linear_model.LinearRegression'
coef_ = CumlArrayDescriptor(order='F')
intercept_ = CumlArrayDescriptor(order='F')
@device_interop_preparation
def __init__(self, *, algorithm='eig', fit_intercept=True,
copy_X=None, normalize=False,
handle=None, verbose=False, output_type=None):
IF GPUBUILD == 1:
if handle is None and algorithm == 'eig':
# if possible, create two streams, so that eigenvalue decomposition
# can benefit from running independent operations concurrently.
handle = Handle(n_streams=2)
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
# internal array attributes
self.coef_ = None
self.intercept_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
if algorithm in ['svd', 'eig', 'qr', 'svd-qr', 'svd-jacobi']:
self.algorithm = algorithm
self.algo = self._get_algorithm_int(algorithm)
else:
msg = "algorithm {!r} is not supported"
raise TypeError(msg.format(algorithm))
self.intercept_value = 0.0
if copy_X is None:
warnings.warn(
"Starting from version 23.08, the new 'copy_X' parameter defaults "
"to 'True', ensuring a copy of X is created after passing it to "
"fit(), preventing any changes to the input, but with increased "
"memory usage. This represents a change in behavior from previous "
"versions. With `copy_X=False` a copy might still be created if "
"necessary. Explicitly set 'copy_X' to either True or False to "
"suppress this warning.", UserWarning)
copy_X = True
self.copy_X = copy_X
def _get_algorithm_int(self, algorithm):
return {
'svd': 0,
'eig': 1,
'qr': 2,
'svd-qr': 3,
'svd-jacobi': 0
}[algorithm]
@generate_docstring()
@enable_device_interop
def fit(self, X, y, convert_dtype=True,
sample_weight=None) -> "LinearRegression":
"""
Fit the model with X and y.
"""
cdef uintptr_t _X_ptr, _y_ptr, sample_weight_ptr
need_explicit_copy = self.copy_X and hasattr(X, "__cuda_array_interface__") \
and (len(X.shape) < 2 or X.shape[1] == 1)
X_m, n_rows, self.n_features_in_, self.dtype = \
input_to_cuml_array(X,
check_dtype=[np.float32, np.float64],
deepcopy=need_explicit_copy)
_X_ptr = X_m.ptr
self.feature_names_in_ = X_m.index
y_m, _, y_cols, _ = \
input_to_cuml_array(y, check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_rows=n_rows)
_y_ptr = y_m.ptr
if sample_weight is not None:
sample_weight_m, _, _, _ = \
input_to_cuml_array(sample_weight, check_dtype=self.dtype,
convert_to_dtype=(
self.dtype if convert_dtype else None),
check_rows=n_rows, check_cols=1)
sample_weight_ptr = sample_weight_m.ptr
else:
sample_weight_ptr = 0
if self.n_features_in_ < 1:
msg = "X matrix must have at least a column"
raise TypeError(msg)
if n_rows < 2:
msg = "X matrix must have at least two rows"
raise TypeError(msg)
if self.n_features_in_ == 1 and self.algo != 0:
warnings.warn("Changing solver from 'eig' to 'svd' as eig " +
"solver does not support training data with 1 " +
"column currently.", UserWarning)
self.algo = 0
if 1 < y_cols:
if sample_weight is None:
sample_weight_m = None
return self._fit_multi_target(
X_m, y_m, convert_dtype, sample_weight_m
)
self.coef_ = CumlArray.zeros(self.n_features_in_, dtype=self.dtype)
cdef uintptr_t _coef_ptr = self.coef_.ptr
cdef float _c_intercept_f32
cdef double _c_intercept_f64
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if self.dtype == np.float32:
olsFit(handle_[0],
<float*>_X_ptr,
<size_t>n_rows,
<size_t>self.n_features_in_,
<float*>_y_ptr,
<float*>_coef_ptr,
<float*>&_c_intercept_f32,
<bool>self.fit_intercept,
<bool>self.normalize,
<int>self.algo,
<float*>sample_weight_ptr)
self.intercept_ = _c_intercept_f32
else:
olsFit(handle_[0],
<double*>_X_ptr,
<size_t>n_rows,
<size_t>self.n_features_in_,
<double*>_y_ptr,
<double*>_coef_ptr,
<double*>&_c_intercept_f64,
<bool>self.fit_intercept,
<bool>self.normalize,
<int>self.algo,
<double*>sample_weight_ptr)
self.intercept_ = _c_intercept_f64
self.handle.sync()
del X_m
del y_m
if sample_weight is not None:
del sample_weight_m
return self
def _fit_multi_target(self, X, y, convert_dtype=True, sample_weight=None):
# In the cuml C++ layer, there is no support yet for multi-target
# regression, i.e., a y vector with multiple columns.
# We implement the regression in Python here.
X = CumlArray.from_input(
X,
convert_to_dtype=(self.dtype if convert_dtype else None)
)
y = CumlArray.from_input(
y,
convert_to_dtype=(self.dtype if convert_dtype else None)
)
try:
y_cols = y.shape[1]
except IndexError:
y_cols = 1
if self.algo != 0:
warnings.warn("Changing solver to 'svd' as this is the " +
"only solver that support multiple targets " +
"currently.", UserWarning)
self.algo = 0
if self.normalize:
raise ValueError(
"The normalize option is not supported when `y` has "
"multiple columns."
)
if sample_weight is not None:
sample_weight = CumlArray.from_input(
sample_weight,
convert_to_dtype=(self.dtype if convert_dtype else None),
)
coef, intercept = fit_multi_target(
X,
y,
fit_intercept=self.fit_intercept,
sample_weight=sample_weight
)
self.coef_ = CumlArray.from_input(
coef,
check_dtype=self.dtype,
check_rows=self.n_features_in_,
check_cols=y_cols
)
if self.fit_intercept:
self.intercept_ = CumlArray.from_input(
intercept,
check_dtype=self.dtype,
check_rows=y_cols,
check_cols=1
)
else:
self.intercept_ = CumlArray.zeros(y_cols, dtype=self.dtype)
return self
def _predict(self, X, convert_dtype=True) -> CumlArray:
self.dtype = self.coef_.dtype
self.features_in_ = self.coef_.shape[0]
# Adding UniversalBase here skips it in the Method Resolution Order
# (MRO) Since UniversalBase and LinearPredictMixin now both have a
# `predict` method
return super()._predict(X, convert_dtype=convert_dtype)
def get_param_names(self):
return super().get_param_names() + \
['algorithm', 'fit_intercept', 'copy_X', 'normalize']
def get_attr_names(self):
return ['coef_', 'intercept_', 'n_features_in_', 'feature_names_in_']
@staticmethod
def _more_static_tags():
return {"multioutput": True}
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/elastic_net.pyx | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from inspect import signature
from cuml.solvers import CD, QN
from cuml.internals.base import UniversalBase
from cuml.internals.mixins import RegressorMixin, FMajorInputTagMixin
from cuml.common.doc_utils import generate_docstring
from cuml.internals.array import CumlArray
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.logger import warn
from cuml.linear_model.base import LinearPredictMixin
from cuml.internals.api_decorators import device_interop_preparation
from cuml.internals.api_decorators import enable_device_interop
class ElasticNet(UniversalBase,
LinearPredictMixin,
RegressorMixin,
FMajorInputTagMixin):
"""
ElasticNet extends LinearRegression with combined L1 and L2 regularizations
on the coefficients when predicting response y with a linear combination of
the predictors in X. It can reduce the variance of the predictors, force
some coefficients to be small, and improves the conditioning of the
problem.
cuML's ElasticNet an array-like object or cuDF DataFrame, uses coordinate
descent to fit a linear model.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> import cudf
>>> from cuml.linear_model import ElasticNet
>>> enet = ElasticNet(alpha = 0.1, l1_ratio=0.5, solver='qn')
>>> X = cudf.DataFrame()
>>> X['col1'] = cp.array([0, 1, 2], dtype = cp.float32)
>>> X['col2'] = cp.array([0, 1, 2], dtype = cp.float32)
>>> y = cudf.Series(cp.array([0.0, 1.0, 2.0], dtype = cp.float32) )
>>> result_enet = enet.fit(X, y)
>>> print(result_enet.coef_)
0 0.445...
1 0.445...
dtype: float32
>>> print(result_enet.intercept_)
0.108433...
>>> X_new = cudf.DataFrame()
>>> X_new['col1'] = cp.array([3,2], dtype = cp.float32)
>>> X_new['col2'] = cp.array([5,5], dtype = cp.float32)
>>> preds = result_enet.predict(X_new)
>>> print(preds)
0 3.674...
1 3.228...
dtype: float32
Parameters
----------
alpha : float (default = 1.0)
Constant that multiplies the L1 term.
alpha = 0 is equivalent to an ordinary least square, solved by the
LinearRegression object.
For numerical reasons, using alpha = 0 with the Lasso object is not
advised.
Given this, you should use the LinearRegression object.
l1_ratio : float (default = 0.5)
The ElasticNet mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty. For l1_ratio = 1 it is
an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
fit_intercept : boolean (default = True)
If True, Lasso tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
normalize : boolean (default = False)
If True, the predictors in X will be normalized by dividing by the
column-wise standard deviation.
If False, no scaling will be done.
Note: this is in contrast to sklearn's deprecated `normalize` flag,
which divides by the column-wise L2 norm; but this is the same as if
using sklearn's StandardScaler.
max_iter : int (default = 1000)
The maximum number of iterations
tol : float (default = 1e-3)
The tolerance for the optimization: if the updates are smaller than
tol, the optimization code checks the dual gap for optimality and
continues until it is smaller than tol.
solver : {'cd', 'qn'} (default='cd')
Choose an algorithm:
* 'cd' - coordinate descent
* 'qn' - quasi-newton
You may find the alternative 'qn' algorithm is faster when the number
of features is sufficiently large, but the sample size is small.
selection : {'cyclic', 'random'} (default='cyclic')
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default.
This (setting to 'random') often leads to significantly faster
convergence especially when tol is higher than 1e-4.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
Attributes
----------
coef_ : array, shape (n_features)
The estimated coefficients for the linear regression model.
intercept_ : array
The independent term. If `fit_intercept` is False, will be 0.
Notes
-----
For additional docs, see `scikitlearn's ElasticNet
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ElasticNet.html>`_.
"""
_cpu_estimator_import_path = 'sklearn.linear_model.ElasticNet'
coef_ = CumlArrayDescriptor(order='F')
@device_interop_preparation
def __init__(self, *, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-3,
solver='cd', selection='cyclic',
handle=None, output_type=None, verbose=False):
"""
Initializes the elastic-net regression class.
Parameters
----------
alpha : float or double.
l1_ratio : float or double.
fit_intercept: boolean.
normalize: boolean.
max_iter: int
tol: float or double.
solver: str, 'cd' or 'qn'
selection : str, 'cyclic', or 'random'
For additional docs, see `scikitlearn's ElasticNet
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ElasticNet.html>`_.
"""
# Hard-code verbosity as CoordinateDescent does not have verbosity
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self._check_alpha(alpha)
self._check_l1_ratio(l1_ratio)
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.solver = solver
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.solver_model = None
if selection in ['cyclic', 'random']:
self.selection = selection
else:
msg = "selection {!r} is not supported"
raise TypeError(msg.format(selection))
self.intercept_value = 0.0
shuffle = False
if self.selection == 'random':
shuffle = True
if solver == 'qn':
pams = signature(self.__init__).parameters
if (pams['selection'].default != selection):
warn("Parameter 'selection' has no effect "
"when 'qn' solver is used.")
if (pams['normalize'].default != normalize):
warn("Parameter 'normalize' has no effect "
"when 'qn' solver is used.")
self.solver_model = QN(
fit_intercept=self.fit_intercept,
l1_strength=self.alpha * self.l1_ratio,
l2_strength=self.alpha * (1.0 - self.l1_ratio),
max_iter=self.max_iter, handle=self.handle,
loss='l2', tol=self.tol, penalty_normalized=False,
verbose=self.verbose)
elif solver == 'cd':
self.solver_model = CD(
fit_intercept=self.fit_intercept,
normalize=self.normalize, alpha=self.alpha,
l1_ratio=self.l1_ratio, shuffle=shuffle,
max_iter=self.max_iter, handle=self.handle,
tol=self.tol)
else:
raise TypeError(f"solver {solver} is not supported")
def _check_alpha(self, alpha):
if alpha <= 0.0:
msg = "alpha value has to be positive"
raise ValueError(msg.format(alpha))
def _check_l1_ratio(self, l1_ratio):
if l1_ratio < 0.0 or l1_ratio > 1.0:
msg = "l1_ratio value has to be between 0.0 and 1.0"
raise ValueError(msg.format(l1_ratio))
@generate_docstring()
@enable_device_interop
def fit(self, X, y, convert_dtype=True,
sample_weight=None) -> "ElasticNet":
"""
Fit the model with X and y.
"""
self.n_features_in_ = X.shape[1] if X.ndim == 2 else 1
if hasattr(X, 'index'):
self.feature_names_in_ = X.index
self.solver_model.fit(X, y, convert_dtype=convert_dtype,
sample_weight=sample_weight)
if isinstance(self.solver_model, QN):
coefs = self.solver_model.coef_
self.coef_ = CumlArray(
data=coefs,
index=coefs._index,
dtype=coefs.dtype,
order=coefs.order,
shape=(coefs.shape[1],)
)
self.intercept_ = self.solver_model.intercept_.item()
return self
def set_params(self, **params):
super().set_params(**params)
if 'selection' in params:
params.pop('selection')
params['shuffle'] = self.selection == 'random'
self.solver_model.set_params(**params)
return self
def get_param_names(self):
return super().get_param_names() + [
"alpha",
"l1_ratio",
"fit_intercept",
"normalize",
"max_iter",
"tol",
"solver",
"selection",
]
def get_attr_names(self):
return ['intercept_', 'coef_', 'n_features_in_', 'feature_names_in_']
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/mbsgd_classifier.pyx | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.internals.base import Base
from cuml.internals.mixins import ClassifierMixin
from cuml.common.doc_utils import generate_docstring
from cuml.internals.mixins import FMajorInputTagMixin
from cuml.solvers import SGD
class MBSGDClassifier(Base,
ClassifierMixin,
FMajorInputTagMixin):
"""
Linear models (linear SVM, logistic regression, or linear regression)
fitted by minimizing a regularized empirical loss with mini-batch SGD.
The MBSGD Classifier implementation is experimental and and it uses a
different algorithm than sklearn's SGDClassifier. In order to improve
the results obtained from cuML's MBSGDClassifier:
* Reduce the batch size
* Increase the eta0
* Increase the number of iterations
Since cuML is analyzing the data in batches using a small eta0 might
not let the model learn as much as scikit learn does. Furthermore,
decreasing the batch size might seen an increase in the time required
to fit the model.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> import cudf
>>> from cuml.linear_model import MBSGDClassifier
>>> X = cudf.DataFrame()
>>> X['col1'] = cp.array([1,1,2,2], dtype = cp.float32)
>>> X['col2'] = cp.array([1,2,2,3], dtype = cp.float32)
>>> y = cudf.Series(cp.array([1, 1, 2, 2], dtype=cp.float32))
>>> pred_data = cudf.DataFrame()
>>> pred_data['col1'] = cp.asarray([3, 2], dtype=cp.float32)
>>> pred_data['col2'] = cp.asarray([5, 5], dtype=cp.float32)
>>> cu_mbsgd_classifier = MBSGDClassifier(learning_rate='constant',
... eta0=0.05, epochs=2000,
... fit_intercept=True,
... batch_size=1, tol=0.0,
... penalty='l2',
... loss='squared_loss',
... alpha=0.5)
>>> cu_mbsgd_classifier.fit(X, y)
MBSGDClassifier()
>>> print("cuML intercept : ", cu_mbsgd_classifier.intercept_)
cuML intercept : 0.725...
>>> print("cuML coef : ", cu_mbsgd_classifier.coef_)
cuML coef : 0 0.273...
1 0.182...
dtype: float32
>>> cu_pred = cu_mbsgd_classifier.predict(pred_data)
>>> print("cuML predictions : ", cu_pred)
cuML predictions : 0 1.0
1 1.0
dtype: float32
Parameters
----------
loss : {'hinge', 'log', 'squared_loss'} (default = 'hinge')
'hinge' uses linear SVM
'log' uses logistic regression
'squared_loss' uses linear regression
penalty : {'none', 'l1', 'l2', 'elasticnet'} (default = 'l2')
'none' does not perform any regularization
'l1' performs L1 norm (Lasso) which minimizes the sum of the abs value
of coefficients
'l2' performs L2 norm (Ridge) which minimizes the sum of the square of
the coefficients
'elasticnet' performs Elastic Net regularization which is a weighted
average of L1 and L2 norms
alpha : float (default = 0.0001)
The constant value which decides the degree of regularization
l1_ratio : float (default=0.15)
The l1_ratio is used only when `penalty = elasticnet`. The value for
l1_ratio should be `0 <= l1_ratio <= 1`. When `l1_ratio = 0` then the
`penalty = 'l2'` and if `l1_ratio = 1` then `penalty = 'l1'`
batch_size : int (default = 32)
It sets the number of samples that will be included in each batch.
fit_intercept : boolean (default = True)
If True, the model tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
epochs : int (default = 1000)
The number of times the model should iterate through the entire dataset
during training (default = 1000)
tol : float (default = 1e-3)
The training process will stop if current_loss > previous_loss - tol
shuffle : boolean (default = True)
True, shuffles the training data after each epoch
False, does not shuffle the training data after each epoch
eta0 : float (default = 0.001)
Initial learning rate
power_t : float (default = 0.5)
The exponent used for calculating the invscaling learning rate
learning_rate : {'optimal', 'constant', 'invscaling', 'adaptive'} \
(default = 'constant')
`optimal` option will be supported in a future version
`constant` keeps the learning rate constant
`adaptive` changes the learning rate if the training loss or the
validation accuracy does not improve for `n_iter_no_change` epochs.
The old learning rate is generally divided by 5
n_iter_no_change : int (default = 5)
the number of epochs to train without any improvement in the model
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Notes
-----
For additional docs, see `scikitlearn's SGDClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html>`_.
"""
def __init__(self, *, loss='hinge', penalty='l2', alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, epochs=1000, tol=1e-3,
shuffle=True, learning_rate='constant', eta0=0.001,
power_t=0.5, batch_size=32, n_iter_no_change=5, handle=None,
verbose=False, output_type=None):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self.loss = loss
self.penalty = penalty
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.epochs = epochs
self.tol = tol
self.shuffle = shuffle
self.learning_rate = learning_rate
self.eta0 = eta0
self.power_t = power_t
self.batch_size = batch_size
self.n_iter_no_change = n_iter_no_change
self.solver_model = SGD(**self.get_params())
@generate_docstring()
def fit(self, X, y, convert_dtype=True) -> "MBSGDClassifier":
"""
Fit the model with X and y.
"""
self.solver_model._estimator_type = self._estimator_type
self.solver_model.fit(X, y, convert_dtype=convert_dtype)
return self
@generate_docstring(return_values={'name': 'preds',
'type': 'dense',
'description': 'Predicted values',
'shape': '(n_samples, 1)'})
@cuml.internals.api_base_return_array_skipall
def predict(self, X, convert_dtype=False) -> CumlArray:
"""
Predicts the y for X.
"""
preds = \
self.solver_model.predictClass(X,
convert_dtype=convert_dtype)
return preds
def set_params(self, **params):
super().set_params(**params)
self.solver_model.set_params(**params)
return self
def get_param_names(self):
return super().get_param_names() + [
"loss",
"penalty",
"alpha",
"l1_ratio",
"fit_intercept",
"epochs",
"tol",
"shuffle",
"learning_rate",
"eta0",
"power_t",
"batch_size",
"n_iter_no_change",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/base_mg.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import cuml.common.opg_data_utils_mg as opg
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
rmm = gpu_only_import('rmm')
from libc.stdint cimport uintptr_t
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.common.opg_data_utils_mg cimport *
from cuml.internals.input_utils import input_to_cuml_array
from cuml.decomposition.utils cimport *
class MGFitMixin(object):
@cuml.internals.api_base_return_any_skipall
def fit(self, input_data, n_rows, n_cols, partsToSizes, rank, order='F'):
"""
Fit function for MNMG linear regression classes
This not meant to be used as
part of the public API.
:param X: array of local dataframes / array partitions
:param n_rows: total number of rows
:param n_cols: total number of cols
:param partsToSizes: array of tuples in the format: [(rank,size)]
:return: self
"""
self._set_output_type(input_data[0][0])
self._set_n_features_in(n_cols)
X_arys = []
y_arys = []
for i in range(len(input_data)):
if i == 0:
check_dtype = [np.float32, np.float64]
else:
check_dtype = self.dtype
X_m, _, self.n_cols, _ = \
input_to_cuml_array(input_data[i][0], check_dtype=check_dtype, order=order)
X_arys.append(X_m)
if i == 0:
self.dtype = X_m.dtype
y_m, *_ = input_to_cuml_array(input_data[i][1],
check_dtype=self.dtype)
y_arys.append(y_m)
self.coef_ = CumlArray.zeros(self.n_cols,
dtype=self.dtype)
cdef uintptr_t coef_ptr = self.coef_.ptr
coef_ptr_arg = <size_t>coef_ptr
cdef uintptr_t rank_to_sizes = opg.build_rank_size_pair(partsToSizes,
rank)
cdef uintptr_t part_desc = opg.build_part_descriptor(n_rows,
n_cols,
rank_to_sizes,
rank)
cdef uintptr_t X_arg = opg.build_data_t(X_arys)
cdef uintptr_t y_arg = opg.build_data_t(y_arys)
# call inheriting class _fit that does all cython pointers and calls
self._fit(X=X_arg,
y=y_arg,
coef_ptr=coef_ptr_arg,
input_desc=part_desc)
opg.free_rank_size_pair(rank_to_sizes)
opg.free_part_descriptor(part_desc)
opg.free_data_t(X_arg, self.dtype)
opg.free_data_t(y_arg, self.dtype)
return self
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/logistic_regression_mg.pyx | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from libcpp cimport bool
from libc.stdint cimport uintptr_t
from cuml.common import input_to_cuml_array
import numpy as np
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.linear_model.base_mg import MGFitMixin
from cuml.linear_model import LogisticRegression
from cuml.solvers.qn import QNParams
from cython.operator cimport dereference as deref
from pylibraft.common.handle cimport handle_t
from cuml.common.opg_data_utils_mg cimport *
# the cdef was copied from cuml.linear_model.qn
cdef extern from "cuml/linear_model/glm.hpp" namespace "ML::GLM" nogil:
# TODO: Use single-GPU version qn_loss_type and qn_params https://github.com/rapidsai/cuml/issues/5502
cdef enum qn_loss_type "ML::GLM::qn_loss_type":
QN_LOSS_LOGISTIC "ML::GLM::QN_LOSS_LOGISTIC"
QN_LOSS_SQUARED "ML::GLM::QN_LOSS_SQUARED"
QN_LOSS_SOFTMAX "ML::GLM::QN_LOSS_SOFTMAX"
QN_LOSS_SVC_L1 "ML::GLM::QN_LOSS_SVC_L1"
QN_LOSS_SVC_L2 "ML::GLM::QN_LOSS_SVC_L2"
QN_LOSS_SVR_L1 "ML::GLM::QN_LOSS_SVR_L1"
QN_LOSS_SVR_L2 "ML::GLM::QN_LOSS_SVR_L2"
QN_LOSS_ABS "ML::GLM::QN_LOSS_ABS"
QN_LOSS_UNKNOWN "ML::GLM::QN_LOSS_UNKNOWN"
cdef struct qn_params:
qn_loss_type loss
double penalty_l1
double penalty_l2
double grad_tol
double change_tol
int max_iter
int linesearch_max_iter
int lbfgs_memory
int verbose
bool fit_intercept
bool penalty_normalized
cdef extern from "cuml/linear_model/qn_mg.hpp" namespace "ML::GLM::opg" nogil:
cdef void qnFit(
handle_t& handle,
vector[floatData_t *] input_data,
PartDescriptor &input_desc,
vector[floatData_t *] labels,
float *coef,
const qn_params& pams,
bool X_col_major,
int n_classes,
float *f,
int *num_iters) except +
cdef vector[float] getUniquelabelsMG(
const handle_t& handle,
PartDescriptor &input_desc,
vector[floatData_t*] labels) except+
class LogisticRegressionMG(MGFitMixin, LogisticRegression):
def __init__(self, **kwargs):
super(LogisticRegressionMG, self).__init__(**kwargs)
@property
@cuml.internals.api_base_return_array_skipall
def coef_(self):
return self.solver_model.coef_
@coef_.setter
def coef_(self, value):
# convert 1-D value to 2-D (to inherit MGFitMixin which sets self.coef_ to a 1-D array of length self.n_cols)
if len(value.shape) == 1:
new_shape=(1, value.shape[0])
cp_array = value.to_output('array').reshape(new_shape)
value, _, _, _ = input_to_cuml_array(cp_array, order='K')
if (self.fit_intercept) and (self.solver_model.intercept_ is None):
self.solver_model.intercept_ = CumlArray.zeros(shape=(1, 1), dtype = value.dtype)
self.solver_model.coef_ = value
def create_qnparams(self):
return QNParams(
loss=self.loss,
penalty_l1=self.l1_strength,
penalty_l2=self.l2_strength,
grad_tol=self.tol,
change_tol=self.delta
if self.delta is not None else (self.tol * 0.01),
max_iter=self.max_iter,
linesearch_max_iter=self.linesearch_max_iter,
lbfgs_memory=self.lbfgs_memory,
verbose=self.verbose,
fit_intercept=self.fit_intercept,
penalty_normalized=self.penalty_normalized
)
def prepare_for_fit(self, n_classes):
self.solver_model.qnparams = self.create_qnparams()
# modified
qnpams = self.solver_model.qnparams.params
# modified qnp
solves_classification = qnpams['loss'] in {
qn_loss_type.QN_LOSS_LOGISTIC,
qn_loss_type.QN_LOSS_SOFTMAX,
qn_loss_type.QN_LOSS_SVC_L1,
qn_loss_type.QN_LOSS_SVC_L2
}
solves_multiclass = qnpams['loss'] in {
qn_loss_type.QN_LOSS_SOFTMAX
}
if solves_classification:
self._num_classes = n_classes
else:
self._num_classes = 1
if not solves_multiclass and self._num_classes > 2:
raise ValueError(
f"The selected solver ({self.loss}) does not support"
f" more than 2 classes ({self._num_classes} discovered).")
if qnpams['loss'] == qn_loss_type.QN_LOSS_SOFTMAX \
and self._num_classes <= 2:
raise ValueError("Two classes or less cannot be trained"
"with softmax (multinomial).")
if solves_classification and not solves_multiclass:
self._num_classes_dim = self._num_classes - 1
else:
self._num_classes_dim = self._num_classes
if self.fit_intercept:
coef_size = (self.n_cols + 1, self._num_classes_dim)
else:
coef_size = (self.n_cols, self._num_classes_dim)
if self.coef_ is None or not self.warm_start:
self.solver_model._coef_ = CumlArray.zeros(
coef_size, dtype=self.dtype, order='C')
def fit(self, input_data, n_rows, n_cols, parts_rank_size, rank, convert_dtype=False):
assert len(input_data) == 1, f"Currently support only one (X, y) pair in the list. Received {len(input_data)} pairs."
self.is_col_major = False
order = 'F' if self.is_col_major else 'C'
super().fit(input_data, n_rows, n_cols, parts_rank_size, rank, order=order)
@cuml.internals.api_base_return_any_skipall
def _fit(self, X, y, coef_ptr, input_desc):
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef float objective32
cdef int num_iters
cdef vector[float] c_classes_
c_classes_ = getUniquelabelsMG(
handle_[0],
deref(<PartDescriptor*><uintptr_t>input_desc),
deref(<vector[floatData_t*]*><uintptr_t>y))
self.classes_ = np.sort(list(c_classes_)).astype('float32')
self._num_classes = len(self.classes_)
self.loss = "sigmoid" if self._num_classes <= 2 else "softmax"
self.prepare_for_fit(self._num_classes)
cdef uintptr_t mat_coef_ptr = self.coef_.ptr
cdef qn_params qnpams = self.solver_model.qnparams.params
if self.dtype == np.float32:
qnFit(
handle_[0],
deref(<vector[floatData_t*]*><uintptr_t>X),
deref(<PartDescriptor*><uintptr_t>input_desc),
deref(<vector[floatData_t*]*><uintptr_t>y),
<float*>mat_coef_ptr,
qnpams,
self.is_col_major,
self._num_classes,
<float*> &objective32,
<int*> &num_iters)
self.solver_model.objective = objective32
else:
assert False, "dtypes other than float32 are currently not supported yet. See issue: https://github.com/rapidsai/cuml/issues/5589"
self.solver_model.num_iters = num_iters
self.solver_model._calc_intercept()
self.handle.sync()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/__init__.py | #
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.device_support import GPU_ENABLED
from cuml.linear_model.elastic_net import ElasticNet
from cuml.linear_model.lasso import Lasso
from cuml.linear_model.linear_regression import LinearRegression
from cuml.linear_model.logistic_regression import LogisticRegression
from cuml.linear_model.ridge import Ridge
if GPU_ENABLED:
from cuml.linear_model.mbsgd_classifier import MBSGDClassifier
from cuml.linear_model.mbsgd_regressor import MBSGDRegressor
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/linear_model/ridge.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
import warnings
from libc.stdint cimport uintptr_t
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.base import UniversalBase
from cuml.internals.mixins import RegressorMixin, FMajorInputTagMixin
from cuml.internals.array import CumlArray
from cuml.common.doc_utils import generate_docstring
from cuml.linear_model.base import LinearPredictMixin
from cuml.common import input_to_cuml_array
from cuml.internals.api_decorators import device_interop_preparation
from cuml.internals.api_decorators import enable_device_interop
IF GPUBUILD == 1:
from libcpp cimport bool
from pylibraft.common.handle cimport handle_t
cdef extern from "cuml/linear_model/glm.hpp" namespace "ML::GLM":
cdef void ridgeFit(handle_t& handle,
float *input,
size_t n_rows,
size_t n_cols,
float *labels,
float *alpha,
int n_alpha,
float *coef,
float *intercept,
bool fit_intercept,
bool normalize,
int algo,
float *sample_weight) except +
cdef void ridgeFit(handle_t& handle,
double *input,
size_t n_rows,
size_t n_cols,
double *labels,
double *alpha,
int n_alpha,
double *coef,
double *intercept,
bool fit_intercept,
bool normalize,
int algo,
double *sample_weight) except +
class Ridge(UniversalBase,
RegressorMixin,
LinearPredictMixin,
FMajorInputTagMixin):
"""
Ridge extends LinearRegression by providing L2 regularization on the
coefficients when predicting response y with a linear combination of the
predictors in X. It can reduce the variance of the predictors, and improves
the conditioning of the problem.
cuML's Ridge can take array-like objects, either in host as
NumPy arrays or in device (as Numba or `__cuda_array_interface__`
compliant), in addition to cuDF objects. It provides 3
algorithms: SVD, Eig and CD to fit a linear model. In general SVD uses
significantly more memory and is slower than Eig. If using CUDA 10.1,
the memory difference is even bigger than in the other supported CUDA
versions. However, SVD is more stable than Eig (default). CD uses
Coordinate Descent and can be faster when data is large.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> import cudf
>>> # Both import methods supported
>>> from cuml import Ridge
>>> from cuml.linear_model import Ridge
>>> alpha = cp.array([1e-5])
>>> ridge = Ridge(alpha=alpha, fit_intercept=True, normalize=False,
... solver="eig")
>>> X = cudf.DataFrame()
>>> X['col1'] = cp.array([1,1,2,2], dtype = cp.float32)
>>> X['col2'] = cp.array([1,2,2,3], dtype = cp.float32)
>>> y = cudf.Series(cp.array([6.0, 8.0, 9.0, 11.0], dtype=cp.float32))
>>> result_ridge = ridge.fit(X, y)
>>> print(result_ridge.coef_) # doctest: +SKIP
0 1.000...
1 1.999...
>>> print(result_ridge.intercept_)
3.0...
>>> X_new = cudf.DataFrame()
>>> X_new['col1'] = cp.array([3,2], dtype=cp.float32)
>>> X_new['col2'] = cp.array([5,5], dtype=cp.float32)
>>> preds = result_ridge.predict(X_new)
>>> print(preds) # doctest: +SKIP
0 15.999...
1 14.999...
Parameters
----------
alpha : float (default = 1.0)
Regularization strength - must be a positive float. Larger values
specify stronger regularization. Array input will be supported later.
solver : {'eig', 'svd', 'cd'} (default = 'eig')
Eig uses a eigendecomposition of the covariance matrix, and is much
faster.
SVD is slower, but guaranteed to be stable.
CD or Coordinate Descent is very fast and is suitable for large
problems.
fit_intercept : boolean (default = True)
If True, Ridge tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
normalize : boolean (default = False)
If True, the predictors in X will be normalized by dividing by the
column-wise standard deviation.
If False, no scaling will be done.
Note: this is in contrast to sklearn's deprecated `normalize` flag,
which divides by the column-wise L2 norm; but this is the same as if
using sklearn's StandardScaler.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
Attributes
----------
coef_ : array, shape (n_features)
The estimated coefficients for the linear regression model.
intercept_ : array
The independent term. If `fit_intercept` is False, will be 0.
Notes
-----
Ridge provides L2 regularization. This means that the coefficients can
shrink to become very small, but not zero. This can cause issues of
interpretability on the coefficients.
Consider using Lasso, or thresholding small coefficients to zero.
**Applications of Ridge**
Ridge Regression is used in the same way as LinearRegression, but does
not suffer from multicollinearity issues. Ridge is used in insurance
premium prediction, stock market analysis and much more.
For additional docs, see `Scikit-learn's Ridge Regression
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html>`_.
"""
_cpu_estimator_import_path = 'sklearn.linear_model.Ridge'
coef_ = CumlArrayDescriptor(order='F')
intercept_ = CumlArrayDescriptor(order='F')
@device_interop_preparation
def __init__(self, *, alpha=1.0, solver='eig', fit_intercept=True,
normalize=False, handle=None, output_type=None,
verbose=False):
"""
Initializes the linear ridge regression class.
Parameters
----------
solver : Type: string. 'eig' (default) and 'svd' are supported
algorithms.
fit_intercept: boolean. For more information, see `scikitlearn's OLS
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html>`_.
normalize: boolean. For more information, see `scikitlearn's OLS
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html>`_.
"""
self._check_alpha(alpha)
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
# internal array attributes
self.coef_ = None
self.intercept_ = None
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
if solver in ['svd', 'eig', 'cd']:
self.solver = solver
self.algo = self._get_algorithm_int(solver)
else:
msg = "solver {!r} is not supported"
raise TypeError(msg.format(solver))
self.intercept_value = 0.0
def _check_alpha(self, alpha):
if alpha <= 0.0:
msg = "alpha value has to be positive"
raise TypeError(msg.format(alpha))
def _get_algorithm_int(self, algorithm):
return {
'svd': 0,
'eig': 1,
'cd': 2
}[algorithm]
@generate_docstring()
@enable_device_interop
def fit(self, X, y, convert_dtype=True, sample_weight=None) -> "Ridge":
"""
Fit the model with X and y.
"""
cdef uintptr_t _X_ptr, _y_ptr, _sample_weight_ptr
X_m, n_rows, self.n_features_in_, self.dtype = \
input_to_cuml_array(X, deepcopy=True,
check_dtype=[np.float32, np.float64])
_X_ptr = X_m.ptr
self.feature_names_in_ = X_m.index
y_m, _, _, _ = \
input_to_cuml_array(y, check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_rows=n_rows, check_cols=1)
_y_ptr = y_m.ptr
if sample_weight is not None:
sample_weight_m, _, _, _ = \
input_to_cuml_array(sample_weight, check_dtype=self.dtype,
convert_to_dtype=(
self.dtype if convert_dtype else None),
check_rows=n_rows, check_cols=1)
_sample_weight_ptr = sample_weight_m.ptr
else:
_sample_weight_ptr = 0
if self.n_features_in_ < 1:
msg = "X matrix must have at least a column"
raise TypeError(msg)
if n_rows < 2:
msg = "X matrix must have at least two rows"
raise TypeError(msg)
if self.n_features_in_ == 1 and self.algo != 0:
warnings.warn("Changing solver to 'svd' as 'eig' or 'cd' " +
"solvers do not support training data with 1 " +
"column currently.", UserWarning)
self.algo = 0
self.n_alpha = 1
self.coef_ = CumlArray.zeros(self.n_features_in_, dtype=self.dtype)
cdef uintptr_t _coef_ptr = self.coef_.ptr
cdef float _c_intercept_f32
cdef double _c_intercept_f64
cdef float _c_alpha_f32
cdef double _c_alpha_f64
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if self.dtype == np.float32:
_c_alpha_f32 = self.alpha
ridgeFit(handle_[0],
<float*>_X_ptr,
<size_t>n_rows,
<size_t>self.n_features_in_,
<float*>_y_ptr,
<float*>&_c_alpha_f32,
<int>self.n_alpha,
<float*>_coef_ptr,
<float*>&_c_intercept_f32,
<bool>self.fit_intercept,
<bool>self.normalize,
<int>self.algo,
<float*>_sample_weight_ptr)
self.intercept_ = _c_intercept_f32
else:
_c_alpha_f64 = self.alpha
ridgeFit(handle_[0],
<double*>_X_ptr,
<size_t>n_rows,
<size_t>self.n_features_in_,
<double*>_y_ptr,
<double*>&_c_alpha_f64,
<int>self.n_alpha,
<double*>_coef_ptr,
<double*>&_c_intercept_f64,
<bool>self.fit_intercept,
<bool>self.normalize,
<int>self.algo,
<double*>_sample_weight_ptr)
self.intercept_ = _c_intercept_f64
self.handle.sync()
del X_m
del y_m
if sample_weight is not None:
del sample_weight_m
return self
def set_params(self, **params):
super().set_params(**params)
if 'solver' in params:
if params['solver'] in ['svd', 'eig', 'cd']:
self.algo = self._get_algorithm_int(params['solver'])
else:
msg = "solver {!r} is not supported"
raise TypeError(msg.format(params['solver']))
return self
def get_param_names(self):
return super().get_param_names() + \
['solver', 'fit_intercept', 'normalize', 'alpha']
def get_attr_names(self):
return ['intercept_', 'coef_', 'n_features_in_', 'feature_names_in_']
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/pipeline/__init__.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.import_utils import has_sklearn
if has_sklearn():
from sklearn.pipeline import Pipeline, make_pipeline
disclaimer = """
This code is developed and maintained by scikit-learn and imported
by cuML to maintain the familiar sklearn namespace structure.
cuML includes tests to ensure full compatibility of these wrappers
with CUDA-based data and cuML estimators, but all of the underlying code
is due to the scikit-learn developers.\n\n"""
Pipeline.__doc__ = disclaimer + Pipeline.__doc__
make_pipeline.__doc__ = disclaimer + make_pipeline.__doc__
__all__ = ["Pipeline", "make_pipeline"]
else:
raise ImportError(
"Scikit-learn is needed to use " "Pipeline and make_pipeline"
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/benchmark/nvtx_benchmark.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import sys
import tempfile
from subprocess import run
import json
class Profiler:
def __init__(self, tmp_path=None):
self.tmp_dir = tempfile.TemporaryDirectory(dir=tmp_path)
self.nsys_file = os.path.join(self.tmp_dir.name, "report.nsys-rep")
self.json_file = os.path.join(self.tmp_dir.name, "report.json")
shutil.rmtree(self.tmp_dir.name)
os.makedirs(self.tmp_dir.name, exist_ok=True)
def __del__(self):
self.tmp_dir.cleanup()
self.tmp_dir = None
@staticmethod
def _execute(command):
res = run(
command,
shell=False,
capture_output=True,
env=dict(os.environ, NVTX_BENCHMARK="TRUE"),
)
if res.returncode != 0:
raise Exception(res.stderr)
else:
return res.stdout
def _nsys_profile(self, command):
profile_command = [
"nsys",
"profile",
"--trace=nvtx",
"--force-overwrite=true",
"--output={nsys_file}".format(nsys_file=self.nsys_file),
]
profile_command.extend(command.split(" "))
self._execute(profile_command)
def _nsys_export2json(self):
export_command = [
"nsys",
"export",
"--type=json",
"--separate-strings=true",
"--force-overwrite=true",
"--output={json_file}".format(json_file=self.json_file),
self.nsys_file,
]
self._execute(export_command)
def _parse_json(self):
with open(self.json_file, "r") as json_file:
json_content = json_file.read().replace("\n", ",")[:-1]
json_content = '{"dict": [\n' + json_content + "\n]}"
profile = json.loads(json_content)["dict"]
nvtx_events = [p["NvtxEvent"] for p in profile if "NvtxEvent" in p]
nvtx_events = [
p for p in nvtx_events if "Text" in p and "DomainId" in p
]
def get_id(attribute, lookfor, nvtx_events):
idxs = [
p[attribute] for p in nvtx_events if p["Text"] == lookfor
]
return idxs[0] if len(idxs) > 0 else None
authorized_domains = {}
for domain_name in [
"cuml_python",
"cuml_cpp",
"cudf_python",
"cudf_cpp",
]:
domain_id = get_id("DomainId", domain_name, nvtx_events)
authorized_domains[domain_id] = domain_name
nvtx_events = [
p
for p in nvtx_events
if p["DomainId"] in authorized_domains.keys()
]
utils_category_id = get_id("Category", "utils", nvtx_events)
def _process_nvtx_event(record):
new_record = {
"measurement": record["Text"],
"start": int(record["Timestamp"]),
}
if "EndTimestamp" in record:
runtime = int(record["EndTimestamp"]) - int(
record["Timestamp"]
)
new_record["runtime"] = runtime
new_record["end"] = int(record["EndTimestamp"])
if "DomainId" in record:
domain_id = record["DomainId"]
new_record["domain"] = authorized_domains[domain_id]
# cuDF work and utils from cuML are categorized as utilities
if (
"Category" in record
and record["Category"] == utils_category_id
) or new_record["domain"].startswith("cudf"):
new_record["category"] = "utils"
else:
new_record["category"] = "none"
return new_record
return list(map(_process_nvtx_event, nvtx_events))
@staticmethod
def _display_results(results):
nvtx_events = [r for r in results if "runtime" in r]
nvtx_events.sort(key=lambda r: r["start"])
max_length = max([len(r["measurement"]) for r in nvtx_events]) + 16
def aggregate(records):
agg = {}
for r in records:
measurement = r["measurement"]
runtime = int(r["runtime"])
if measurement in agg:
agg[measurement]["runtime"] += runtime
else:
agg[measurement] = {
"measurement": measurement,
"runtime": runtime,
"start": r["start"],
}
agg = list(agg.values())
agg.sort(key=lambda r: r["start"])
return agg
def nesting_hierarchy(records):
ends = []
for r in records:
ends = [e for e in ends if r["start"] < e]
r["nesting_hierarchy"] = len(ends)
ends.append(r["end"])
return records
def display(measurement, runtime):
measurement = measurement.ljust(max_length + 4)
runtime = round(int(runtime) / 10**9, 4)
msg = "{measurement} : {runtime:8.4f} s"
msg = msg.format(measurement=measurement, runtime=runtime)
print(msg)
while len(nvtx_events):
record = nvtx_events[0]
display(record["measurement"], record["runtime"])
# Filter events belonging to this event
end = record["end"]
events_to_print = [r for r in nvtx_events[1:] if r["start"] < end]
# Filter events and compute nesting hierarchy
reg_events_to_print = [
r for r in events_to_print if r["category"] != "utils"
]
reg_events_to_print = nesting_hierarchy(reg_events_to_print)
for r in reg_events_to_print:
measurement = (
" |"
+ ("==" * r["nesting_hierarchy"])
+ "> "
+ r["measurement"]
)
display(measurement, r["runtime"])
# Filter utils events and aggregate them by adding up runtimes
utils_events_to_print = [
r for r in events_to_print if r["category"] == "utils"
]
utils_events_to_print = aggregate(utils_events_to_print)
if len(reg_events_to_print) and len(utils_events_to_print):
print()
if len(utils_events_to_print):
print(" Utils summary:")
for r in utils_events_to_print:
display(" " + r["measurement"], r["runtime"])
# Remove events just displayed from the list
nvtx_events = [r for r in nvtx_events if r["start"] >= end]
if len(nvtx_events):
print("\n")
def profile(self, command):
self._nsys_profile(command)
self._nsys_export2json()
results = self._parse_json()
self._display_results(results)
if __name__ == "__main__":
def check_version():
stdout = Profiler._execute(["nsys", "--version"])
full_version = stdout.decode("utf-8").split(" ")[-1]
year, month = full_version.split(".")[:2]
version = float(year + "." + month)
if version < 2021.4:
raise Exception(
"This script requires nsys 2021.4 "
"or later version of the tool."
)
check_version()
profiler = Profiler()
profiler.profile(sys.argv[1])
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/benchmark/runners.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Wrappers to run ML benchmarks"""
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.benchmark import datagen
from cuml.common.device_selection import using_device_type
import warnings
import time
import itertools
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
Series = gpu_only_import_from("cudf", "Series")
class BenchmarkTimer:
"""Provides a context manager that runs a code block `reps` times
and records results to the instance variable `timings`. Use like:
.. code-block:: python
timer = BenchmarkTimer(rep=5)
for _ in timer.benchmark_runs():
... do something ...
print(np.min(timer.timings))
"""
def __init__(self, reps=1):
self.reps = reps
self.timings = []
def benchmark_runs(self):
for r in range(self.reps):
t0 = time.time()
yield r
t1 = time.time()
self.timings.append(t1 - t0)
class SpeedupComparisonRunner:
"""Wrapper to run an algorithm with multiple dataset sizes
and compute speedup of cuml relative to sklearn baseline."""
def __init__(
self,
bench_rows,
bench_dims,
dataset_name="blobs",
input_type="numpy",
n_reps=1,
):
self.bench_rows = bench_rows
self.bench_dims = bench_dims
self.dataset_name = dataset_name
self.input_type = input_type
self.n_reps = n_reps
def _run_one_size(
self,
algo_pair,
n_samples,
n_features,
param_overrides={},
cuml_param_overrides={},
cpu_param_overrides={},
dataset_param_overrides={},
dtype=np.float32,
run_cpu=True,
device="gpu",
verbose=False,
):
data = datagen.gen_data(
self.dataset_name,
self.input_type,
n_samples,
n_features,
dtype=dtype,
**dataset_param_overrides,
)
with using_device_type(device):
setup_overrides = algo_pair.setup_cuml(
data, **param_overrides, **cuml_param_overrides
)
cuml_timer = BenchmarkTimer(self.n_reps)
for rep in cuml_timer.benchmark_runs():
algo_pair.run_cuml(
data,
**param_overrides,
**cuml_param_overrides,
**setup_overrides,
)
cu_elapsed = np.min(cuml_timer.timings)
if run_cpu and algo_pair.cpu_class is not None:
setup_overrides = algo_pair.setup_cpu(
data, **param_overrides, **cpu_param_overrides
)
cpu_timer = BenchmarkTimer(self.n_reps)
for rep in cpu_timer.benchmark_runs():
algo_pair.run_cpu(
data,
**param_overrides,
**cpu_param_overrides,
**setup_overrides,
)
cpu_elapsed = np.min(cpu_timer.timings)
else:
if run_cpu:
warnings.warn(
"run_cpu argument is set to True but no CPU "
"implementation was provided. It's possible "
"an additional library is needed but one could "
"not be found. Benchmark will be executed with "
"run_cpu=False"
)
cpu_elapsed = 0.0
speedup = cpu_elapsed / float(cu_elapsed)
if verbose:
print(
"%s (n_samples=%s, n_features=%s) [cpu=%s, gpu=%s, speedup=%s]"
% (
algo_pair.name,
n_samples,
n_features,
cpu_elapsed,
cu_elapsed,
speedup,
)
)
if n_samples == 0:
# Update n_samples = training samples + testing samples
n_samples = data[0].shape[0] + data[2].shape[0]
if n_features == 0:
# Update n_features
n_features = data[0].shape[1]
return dict(
cuml_time=cu_elapsed,
cpu_time=cpu_elapsed,
speedup=speedup,
n_samples=n_samples,
n_features=n_features,
**param_overrides,
**cuml_param_overrides,
**cpu_param_overrides,
**dataset_param_overrides,
)
def run(
self,
algo_pair,
param_overrides={},
cuml_param_overrides={},
cpu_param_overrides={},
dataset_param_overrides={},
dtype=np.float32,
*,
run_cpu=True,
device="gpu",
raise_on_error=False,
verbose=False,
):
all_results = []
for ns in self.bench_rows:
for nf in self.bench_dims:
try:
all_results.append(
self._run_one_size(
algo_pair,
ns,
nf,
param_overrides,
cuml_param_overrides=cuml_param_overrides,
cpu_param_overrides=cpu_param_overrides,
dataset_param_overrides=dataset_param_overrides,
dtype=dtype,
run_cpu=run_cpu,
device=device,
verbose=verbose,
)
)
except Exception as e:
print(
"Failed to run with %d samples, %d features: %s"
% (ns, nf, str(e))
)
if raise_on_error:
raise
all_results.append(dict(n_samples=ns, n_features=nf))
return all_results
class AccuracyComparisonRunner(SpeedupComparisonRunner):
"""Wrapper to run an algorithm with multiple dataset sizes
and compute accuracy and speedup of cuml relative to sklearn
baseline."""
def __init__(
self,
bench_rows,
bench_dims,
dataset_name="blobs",
input_type="numpy",
test_fraction=0.10,
n_reps=1,
):
super().__init__(
bench_rows, bench_dims, dataset_name, input_type, n_reps
)
self.test_fraction = test_fraction
def _run_one_size(
self,
algo_pair,
n_samples,
n_features,
param_overrides={},
cuml_param_overrides={},
cpu_param_overrides={},
dataset_param_overrides={},
dtype=np.float32,
run_cpu=True,
device="gpu",
verbose=False,
):
data = datagen.gen_data(
self.dataset_name,
self.input_type,
n_samples,
n_features,
dtype=dtype,
test_fraction=self.test_fraction,
**dataset_param_overrides,
)
setup_override = algo_pair.setup_cuml(
data, **{**param_overrides, **cuml_param_overrides}
)
with using_device_type(device):
cuml_timer = BenchmarkTimer(self.n_reps)
for _ in cuml_timer.benchmark_runs():
cuml_model = algo_pair.run_cuml(
data,
**{
**param_overrides,
**cuml_param_overrides,
**setup_override,
},
)
cu_elapsed = np.min(cuml_timer.timings)
if algo_pair.accuracy_function:
if algo_pair.cuml_data_prep_hook is not None:
X_test, y_test = algo_pair.cuml_data_prep_hook(data[2:])
else:
X_test, y_test = data[2:]
if hasattr(cuml_model, "predict"):
y_pred_cuml = cuml_model.predict(X_test)
else:
y_pred_cuml = cuml_model.transform(X_test)
if isinstance(y_pred_cuml, Series):
y_pred_cuml = y_pred_cuml.to_numpy()
cuml_accuracy = algo_pair.accuracy_function(y_test, y_pred_cuml)
else:
cuml_accuracy = 0.0
cpu_accuracy = 0.0
if run_cpu and algo_pair.cpu_class is not None:
setup_override = algo_pair.setup_cpu(
data, **param_overrides, **cpu_param_overrides
)
cpu_timer = BenchmarkTimer(self.n_reps)
for rep in cpu_timer.benchmark_runs():
cpu_model = algo_pair.run_cpu(
data,
**setup_override,
)
cpu_elapsed = np.min(cpu_timer.timings)
if algo_pair.accuracy_function:
if algo_pair.cpu_data_prep_hook is not None:
X_test, y_test = algo_pair.cpu_data_prep_hook(data[2:])
else:
X_test, y_test = data[2:]
if hasattr(cpu_model, "predict"):
y_pred_cpu = cpu_model.predict(X_test)
else:
y_pred_cpu = cpu_model.transform(X_test)
cpu_accuracy = algo_pair.accuracy_function(
y_test, np.asarray(y_pred_cpu)
)
else:
cpu_elapsed = 0.0
if n_samples == 0:
# Update n_samples = training samples + testing samples
n_samples = data[0].shape[0] + data[2].shape[0]
if n_features == 0:
# Update n_features
n_features = data[0].shape[1]
return dict(
cuml_time=cu_elapsed,
cpu_time=cpu_elapsed,
cuml_acc=cuml_accuracy,
cpu_acc=cpu_accuracy,
speedup=cpu_elapsed / float(cu_elapsed),
n_samples=n_samples,
n_features=n_features,
**param_overrides,
**cuml_param_overrides,
**cpu_param_overrides,
**dataset_param_overrides,
)
def run_variations(
algos,
dataset_name,
bench_rows,
bench_dims,
param_override_list=[{}],
cuml_param_override_list=[{}],
cpu_param_override_list=[{}],
dataset_param_override_list=[{}],
dtype=np.float32,
input_type="numpy",
test_fraction=0.1,
run_cpu=True,
device_list=("gpu",),
raise_on_error=False,
n_reps=1,
):
"""
Runs each algo in `algos` once per
`bench_rows X bench_dims X params_override_list X cuml_param_override_list`
combination and returns a dataframe containing timing and accuracy data.
Parameters
----------
algos : str or list
Name of algorithms to run and evaluate
dataset_name : str
Name of dataset to use
bench_rows : list of int
Dataset row counts to test
bench_dims : list of int
Dataset column counts to test
param_override_list : list of dict
Dicts containing parameters to pass to __init__.
Each dict specifies parameters to override in one run of the algorithm.
cuml_param_override_list : list of dict
Dicts containing parameters to pass to __init__ of the cuml algo only.
cpu_param_override_list : list of dict
Dicts containing parameters to pass to __init__ of the cpu algo only.
dataset_param_override_list : dict
Dicts containing parameters to pass to dataset generator function
dtype: [np.float32|np.float64]
Specifies the dataset precision to be used for benchmarking.
test_fraction : float
The fraction of data to use for testing.
run_cpu : boolean
If True, run the cpu-based algorithm for comparison
"""
print("Running: \n", "\n ".join([str(a.name) for a in algos]))
runner = AccuracyComparisonRunner(
bench_rows,
bench_dims,
dataset_name,
input_type,
test_fraction=test_fraction,
n_reps=n_reps,
)
all_results = []
for algo in algos:
print("Running %s..." % (algo.name))
for (
overrides,
cuml_overrides,
cpu_overrides,
dataset_overrides,
device,
) in itertools.product(
param_override_list,
cuml_param_override_list,
cpu_param_override_list,
dataset_param_override_list,
device_list,
):
results = runner.run(
algo,
overrides,
cuml_param_overrides=cuml_overrides,
cpu_param_overrides=cpu_overrides,
dataset_param_overrides=dataset_overrides,
dtype=dtype,
run_cpu=run_cpu,
device=device,
raise_on_error=raise_on_error,
)
for r in results:
all_results.append(
{
"algo": algo.name,
"input": input_type,
"device": device,
**r,
}
)
print("Finished all benchmark runs")
results_df = pd.DataFrame.from_records(all_results)
print(results_df)
return results_df
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/benchmark/run_benchmarks.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Command-line ML benchmark runner"""
import json
from cuml.benchmark import algorithms, datagen, runners
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
PrecisionMap = {
"fp32": np.float32,
"fp64": np.float64,
}
def extract_param_overrides(params_to_sweep):
"""
Parameters
----------
params_to_sweep : list[str]
list of string key=[value] pairs, where values are to be interpreted
as a json-style array. E.g. 'n_estimators=[10,100,1000]'
Returns
---------
List of dicts of params to evaluate. Always contains as least one dict.
"""
import itertools
if not params_to_sweep or len(params_to_sweep) == 0:
return [{}]
# Expand each arg into a list of (key,value) tuples
single_param_lists = []
for p in params_to_sweep:
key, val_string = p.split("=")
vals = json.loads(val_string)
if not isinstance(vals, list):
vals = [vals] # Handle single-element sweep cleanly
single_param_lists.append([(key, val) for val in vals])
# Create dicts with the cartesian product of all arg-based lists
tuple_list = itertools.product(*single_param_lists)
dict_list = [dict(tl) for tl in tuple_list]
return dict_list
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(
prog="run_benchmarks",
description=r"""
Command-line benchmark runner, logging results to
stdout and/or CSV.
Examples:
# Simple logistic regression
python run_benchmarks.py --dataset classification LogisticRegression
# Compare impact of RF parameters and data sets for multiclass
python run_benchmarks.py --dataset classification \
--max-rows 100000 --min-rows 10000 \
--dataset-param-sweep n_classes=[2,8] \
--cuml-param-sweep n_bins=[4,16] n_estimators=[10,100] \
--csv results.csv \
RandomForestClassifier
# Run a bunch of clustering and dimensionality reduction algorithms
# (Because `--input-dimensions` takes a varying number of args, you
# need the extra `--` to separate it from the algorithm names
python run_benchmarks.py --dataset blobs \
--max-rows 20000 --min-rows 20000 --num-sizes 1 \
--input-dimensions 16 256 \
-- DBSCAN KMeans TSNE PCA UMAP
# Use a real dataset at its default size
python run_benchmarks.py --dataset higgs --default-size \
RandomForestClassifier LogisticRegression
""",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--max-rows",
type=int,
default=100000,
help="Evaluate at most max_row samples",
)
parser.add_argument(
"--min-rows",
type=int,
default=10000,
help="Evaluate at least min_rows samples",
)
parser.add_argument(
"--num-sizes",
type=int,
default=2,
help="Number of different sizes to test",
)
parser.add_argument(
"--num-rows",
type=int,
default=None,
metavar="N",
help="Shortcut for --min-rows N --max-rows N --num-sizes 1",
)
parser.add_argument("--num-features", type=int, default=-1)
parser.add_argument(
"--quiet", "-q", action="store_false", dest="verbose", default=True
)
parser.add_argument("--csv", nargs="?")
parser.add_argument("--dataset", default="blobs")
parser.add_argument("--skip-cpu", action="store_true")
parser.add_argument("--input-type", default="numpy")
parser.add_argument(
"--test-split",
default=0.1,
type=float,
help="Fraction of input data used for testing (between 0.0 and 1.0)",
)
parser.add_argument(
"--input-dimensions",
default=[64, 256, 512],
nargs="+",
type=int,
help="Data dimension sizes (may provide multiple sizes)",
)
parser.add_argument(
"--param-sweep",
nargs="*",
type=str,
help="""Parameter values to vary, in the form:
key=val_list, where val_list may be a comma-separated list""",
)
parser.add_argument(
"--cuml-param-sweep",
nargs="*",
type=str,
help="""Parameter values to vary for cuML only, in the form:
key=val_list, where val_list may be a comma-separated list""",
)
parser.add_argument(
"--cpu-param-sweep",
nargs="*",
type=str,
help="""Parameter values to vary for CPU only, in the form:
key=val_list, where val_list may be a comma-separated list""",
)
parser.add_argument(
"--dataset-param-sweep",
nargs="*",
type=str,
help="""Parameter values to vary for dataset generator, in the form
key=val_list, where val_list may be a comma-separated list""",
)
parser.add_argument(
"--default-size",
action="store_true",
help="Only run datasets at default size",
)
parser.add_argument(
"--raise-on-error",
action="store_true",
help="Throw exception on a failed benchmark",
)
parser.add_argument(
"--print-algorithms",
action="store_true",
help="Print the list of all available algorithms and exit",
)
parser.add_argument(
"--print-datasets",
action="store_true",
help="Print the list of all available datasets and exit",
)
parser.add_argument(
"algorithms",
nargs="*",
help="List of algorithms to run, or omit to run all",
)
parser.add_argument("--n-reps", type=int, default=1)
parser.add_argument(
"--dtype",
choices=["fp32", "fp64"],
default="fp32",
help="Precision of the dataset to benchmark with",
)
parser.add_argument(
"--device",
choices=["gpu", "cpu"],
default=["gpu"],
nargs="+",
help="The device to use for cuML execution",
)
args = parser.parse_args()
args.dtype = PrecisionMap[args.dtype]
if args.print_algorithms:
for algo in algorithms.all_algorithms():
print(algo.name)
sys.exit()
if args.print_datasets:
for dataset in datagen.all_datasets().keys():
print(dataset)
sys.exit()
if not 0.0 <= args.test_split <= 1.0:
raise ValueError(
"test_split: got %f, want a value between 0.0 and 1.0"
% args.test_split
)
bench_rows = np.logspace(
np.log10(args.min_rows),
np.log10(args.max_rows),
num=args.num_sizes,
dtype=np.int32,
)
bench_dims = args.input_dimensions
if args.num_rows is not None:
bench_rows = [args.num_rows]
if args.num_features > 0:
bench_dims = [args.num_features]
if args.default_size:
bench_rows = [0]
bench_dims = [0]
param_override_list = extract_param_overrides(args.param_sweep)
cuml_param_override_list = extract_param_overrides(args.cuml_param_sweep)
cpu_param_override_list = extract_param_overrides(args.cpu_param_sweep)
dataset_param_override_list = extract_param_overrides(
args.dataset_param_sweep
)
if args.algorithms:
algos_to_run = []
for name in args.algorithms:
algo = algorithms.algorithm_by_name(name)
if not algo:
raise ValueError("No %s 'algorithm' found" % name)
algos_to_run.append(algo)
else:
# Run all by default
algos_to_run = algorithms.all_algorithms()
results_df = runners.run_variations(
algos_to_run,
dataset_name=args.dataset,
bench_rows=bench_rows,
bench_dims=bench_dims,
input_type=args.input_type,
test_fraction=args.test_split,
param_override_list=param_override_list,
cuml_param_override_list=cuml_param_override_list,
cpu_param_override_list=cpu_param_override_list,
dataset_param_override_list=dataset_param_override_list,
dtype=args.dtype,
run_cpu=(not args.skip_cpu),
device_list=args.device,
raise_on_error=args.raise_on_error,
n_reps=args.n_reps,
)
if args.csv:
results_df.to_csv(args.csv)
print("Saved results to %s" % args.csv)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/benchmark/datagen.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for cuML benchmarks
The main entry point for consumers is gen_data, which
wraps the underlying data generators.
Notes when writing new generators:
Each generator is a function that accepts:
* n_samples (set to 0 for 'default')
* n_features (set to 0 for 'default')
* random_state
* (and optional generator-specific parameters)
The function should return a 2-tuple (X, y), where X is a Pandas
dataframe and y is a Pandas series. If the generator does not produce
labels, it can return (X, None)
A set of helper functions (convert_*) can convert these to alternative
formats. Future revisions may support generating cudf dataframes or
GPU arrays directly instead.
"""
from cuml.internals.import_utils import has_scipy
from cuml.internals.safe_imports import gpu_only_import_from
from cuml.internals import input_utils
from urllib.request import urlretrieve
import sklearn.model_selection
from sklearn.datasets import load_svmlight_file, fetch_covtype
import cuml.datasets
from cuml.internals.safe_imports import cpu_only_import
import os
import functools
import gzip
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
pd = cpu_only_import("pandas")
cuda = gpu_only_import_from("numba", "cuda")
def _gen_data_regression(
n_samples, n_features, random_state=42, dtype=np.float32
):
"""Wrapper for sklearn make_regression"""
if n_samples == 0:
n_samples = int(1e6)
if n_features == 0:
n_features = 100
X_arr, y_arr = cuml.datasets.make_regression(
n_samples=n_samples,
n_features=n_features,
random_state=random_state,
dtype=dtype,
)
X_df = cudf.DataFrame(X_arr)
y_df = cudf.Series(y_arr)
return X_df, y_df
def _gen_data_blobs(
n_samples, n_features, random_state=42, centers=None, dtype=np.float32
):
"""Wrapper for sklearn make_blobs"""
if n_samples == 0:
n_samples = int(1e6)
if n_features == 0:
n_samples = 100
X_arr, y_arr = cuml.datasets.make_blobs(
n_samples=n_samples,
n_features=n_features,
centers=centers,
random_state=random_state,
dtype=dtype,
)
return X_arr, y_arr
def _gen_data_zeros(n_samples, n_features, dtype=np.float32):
"""Dummy generator for use in testing - returns all 0s"""
return cp.zeros((n_samples, n_features), dtype=dtype), cp.zeros(
n_samples, dtype=dtype
)
def _gen_data_classification(
n_samples, n_features, random_state=42, n_classes=2, dtype=np.float32
):
"""Wrapper for sklearn make_blobs"""
if n_samples == 0:
n_samples = int(1e6)
if n_features == 0:
n_samples = 100
X_arr, y_arr = cuml.datasets.make_classification(
n_samples=n_samples,
n_features=n_features,
n_classes=n_classes,
random_state=random_state,
dtype=dtype,
)
X_df = cudf.DataFrame(X_arr)
y_df = cudf.Series(y_arr)
return X_df, y_df
# Default location to cache datasets
DATASETS_DIRECTORY = "."
def _gen_data_airline_regression(datasets_root_dir):
url = "http://kt.ijs.si/elena_ikonomovska/datasets/airline/airline_14col.data.bz2"
local_url = os.path.join(datasets_root_dir, os.path.basename(url))
cols = [
"Year",
"Month",
"DayofMonth",
"DayofWeek",
"CRSDepTime",
"CRSArrTime",
"UniqueCarrier",
"FlightNum",
"ActualElapsedTime",
"Origin",
"Dest",
"Distance",
"Diverted",
"ArrDelay",
]
dtype = np.float64
dtype_columns = {
"Year": dtype,
"Month": dtype,
"DayofMonth": dtype,
"DayofWeek": dtype,
"CRSDepTime": dtype,
"CRSArrTime": dtype,
"FlightNum": dtype,
"ActualElapsedTime": dtype,
"Distance": dtype,
"Diverted": dtype,
"ArrDelay": dtype,
}
if not os.path.isfile(local_url):
urlretrieve(url, local_url)
df = pd.read_csv(local_url, names=cols, dtype=dtype_columns)
# Encode categoricals as numeric
for col in df.select_dtypes(["object"]).columns:
df[col] = df[col].astype("category").cat.codes
X = df[df.columns.difference(["ArrDelay"])]
y = df["ArrDelay"]
return X, y
def _gen_data_airline_classification(datasets_root_dir):
X, y = _gen_data_airline_regression(datasets_root_dir)
y = 1 * (y > 0)
return X, y
def _gen_data_bosch(datasets_root_dir):
local_url = os.path.join(datasets_root_dir, "train_numeric.csv.zip")
if not os.path.isfile(local_url):
raise ValueError(
"Bosch dataset not found (search path: %s)" % local_url
)
df = pd.read_csv(
local_url, index_col=0, compression="zip", dtype=np.float32
)
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
return X, y
def _gen_data_covtype(datasets_root_dir):
X, y = fetch_covtype(return_X_y=True)
# Labele range in covtype start from 1, making it start from 0
y = y - 1
X = pd.DataFrame(X)
y = pd.Series(y)
return X, y
def _gen_data_epsilon(datasets_root_dir):
url_train = (
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary"
"/epsilon_normalized.bz2"
)
url_test = (
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary"
"/epsilon_normalized.t.bz2"
)
local_url_train = os.path.join(
datasets_root_dir, os.path.basename(url_train)
)
local_url_test = os.path.join(
datasets_root_dir, os.path.basename(url_test)
)
if not os.path.isfile(local_url_train):
urlretrieve(url_train, local_url_train)
if not os.path.isfile(local_url_test):
urlretrieve(url_test, local_url_test)
X_train, y_train = load_svmlight_file(local_url_train, dtype=np.float32)
X_test, y_test = load_svmlight_file(local_url_test, dtype=np.float32)
X_train = pd.DataFrame(X_train.toarray())
X_test = pd.DataFrame(X_test.toarray())
y_train[y_train <= 0] = 0
y_test[y_test <= 0] = 0
y_train = pd.Series(y_train)
y_test = pd.Series(y_test)
X = pd.concat([X_train, X_test], ignore_index=True)
y = pd.concat([y_train, y_test], ignore_index=True)
return X, y
def _gen_data_fraud(datasets_root_dir):
local_url = os.path.join(datasets_root_dir, "creditcard.csv.zip")
if not os.path.isfile(local_url):
raise ValueError(
"Fraud dataset not found (search path: %s)" % local_url
)
df = pd.read_csv(local_url, dtype=np.float32)
X = df[[col for col in df.columns if col.startswith("V")]]
y = df["Class"]
return X, y
def _gen_data_higgs(datasets_root_dir):
higgs_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz" # noqa
local_url = os.path.join(datasets_root_dir, os.path.basename(higgs_url))
if not os.path.isfile(local_url):
urlretrieve(higgs_url, local_url)
col_names = ["label"] + [
"col-{}".format(i) for i in range(2, 30)
] # Assign column names
dtypes_ls = [np.int32] + [
np.float32 for _ in range(2, 30)
] # Assign dtypes to each column
df = pd.read_csv(
local_url,
names=col_names,
dtype={k: v for k, v in zip(col_names, dtypes_ls)},
)
X = df[df.columns.difference(["label"])]
y = df["label"]
return X, y
def _gen_data_year(datasets_root_dir):
year_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt.zip"
local_url = os.path.join(datasets_root_dir, "YearPredictionMSD.txt.zip")
if not os.path.isfile(local_url):
urlretrieve(year_url, local_url)
df = pd.read_csv(local_url, header=None)
X = df.iloc[:, 1:]
y = df.iloc[:, 0]
return X, y
def _convert_to_numpy(data):
"""Returns tuple data with all elements converted to numpy ndarrays"""
if data is None:
return None
elif isinstance(data, tuple):
return tuple([_convert_to_numpy(d) for d in data])
elif isinstance(data, np.ndarray):
return data
elif isinstance(data, cp.ndarray):
return cp.asnumpy(data)
elif isinstance(data, cudf.DataFrame):
return data.to_numpy()
elif isinstance(data, cudf.Series):
return data.to_numpy()
elif isinstance(data, (pd.DataFrame, pd.Series)):
return data.to_numpy()
else:
raise Exception("Unsupported type %s" % str(type(data)))
def _convert_to_cupy(data):
"""Returns tuple data with all elements converted to cupy ndarrays"""
if data is None:
return None
elif isinstance(data, tuple):
return tuple([_convert_to_cupy(d) for d in data])
elif isinstance(data, np.ndarray):
return cp.asarray(data)
elif isinstance(data, cp.ndarray):
return data
elif isinstance(data, cudf.DataFrame):
return data.values
elif isinstance(data, cudf.Series):
return data.values
elif isinstance(data, (pd.DataFrame, pd.Series)):
return cp.asarray(data.to_numpy())
else:
raise Exception("Unsupported type %s" % str(type(data)))
def _convert_to_cudf(data):
if data is None:
return None
elif isinstance(data, tuple):
return tuple([_convert_to_cudf(d) for d in data])
elif isinstance(data, (cudf.DataFrame, cudf.Series)):
return data
elif isinstance(data, pd.DataFrame):
return cudf.DataFrame.from_pandas(data)
elif isinstance(data, pd.Series):
return cudf.Series.from_pandas(data)
elif isinstance(data, np.ndarray):
data = np.squeeze(data)
if data.ndim == 1:
return cudf.Series(data)
else:
return cudf.DataFrame(data)
elif isinstance(data, cp.ndarray):
data = np.squeeze(cp.asnumpy(data))
if data.ndim == 1:
return cudf.Series(data)
else:
return cudf.DataFrame(data)
else:
raise Exception("Unsupported type %s" % str(type(data)))
def _convert_to_pandas(data):
if data is None:
return None
elif isinstance(data, tuple):
return tuple([_convert_to_pandas(d) for d in data])
elif isinstance(data, (pd.DataFrame, pd.Series)):
return data
elif isinstance(data, (cudf.DataFrame, cudf.Series)):
return data.to_pandas()
elif isinstance(data, np.ndarray):
data = np.squeeze(data)
if data.ndim == 1:
return pd.Series(data)
else:
return pd.DataFrame(data)
elif isinstance(data, cp.ndarray):
data = np.squeeze(cp.asnumpy(data))
if data.ndim == 1:
return pd.Series(data)
else:
return pd.DataFrame(data)
else:
raise Exception("Unsupported type %s" % str(type(data)))
def _convert_to_gpuarray(data, order="F"):
if data is None:
return None
elif isinstance(data, tuple):
return tuple([_convert_to_gpuarray(d, order=order) for d in data])
elif isinstance(data, pd.DataFrame):
return _convert_to_gpuarray(
cudf.DataFrame.from_pandas(data), order=order
)
elif isinstance(data, pd.Series):
gs = cudf.Series.from_pandas(data)
return cuda.as_cuda_array(gs)
else:
return input_utils.input_to_cuml_array(data, order=order)[0].to_output(
"numba"
)
def _convert_to_gpuarray_c(data):
return _convert_to_gpuarray(data, order="C")
def _sparsify_and_convert(data, input_type, sparsity_ratio=0.3):
"""Randomly set values to 0 and produce a sparse array."""
if not has_scipy():
raise RuntimeError("Scipy is required")
import scipy
random_loc = np.random.choice(
data.size, int(data.size * sparsity_ratio), replace=False
)
data.ravel()[random_loc] = 0
if input_type == "csr":
return scipy.sparse.csr_matrix(data)
elif input_type == "csc":
return scipy.sparse.csc_matrix(data)
else:
TypeError("Wrong sparse input type {}".format(input_type))
def _convert_to_scipy_sparse(data, input_type):
"""Returns a tuple of arrays. Each of the arrays
have some of its values being set randomly to 0,
it is then converted to a scipy sparse array"""
if data is None:
return None
elif isinstance(data, tuple):
return tuple([_convert_to_scipy_sparse(d, input_type) for d in data])
elif isinstance(data, np.ndarray):
return _sparsify_and_convert(data, input_type)
elif isinstance(data, cudf.DataFrame):
return _sparsify_and_convert(data.to_numpy(), input_type)
elif isinstance(data, cudf.Series):
return _sparsify_and_convert(data.to_numpy(), input_type)
elif isinstance(data, (pd.DataFrame, pd.Series)):
return _sparsify_and_convert(data.to_numpy(), input_type)
else:
raise Exception("Unsupported type %s" % str(type(data)))
def _convert_to_scipy_sparse_csr(data):
return _convert_to_scipy_sparse(data, "csr")
def _convert_to_scipy_sparse_csc(data):
return _convert_to_scipy_sparse(data, "csc")
_data_generators = {
"blobs": _gen_data_blobs,
"zeros": _gen_data_zeros,
"classification": _gen_data_classification,
"regression": _gen_data_regression,
"airline_regression": _gen_data_airline_regression,
"airline_classification": _gen_data_airline_classification,
"bosch": _gen_data_bosch,
"covtype": _gen_data_covtype,
"epsilon": _gen_data_epsilon,
"fraud": _gen_data_fraud,
"higgs": _gen_data_higgs,
"year": _gen_data_year,
}
_data_converters = {
"numpy": _convert_to_numpy,
"cupy": _convert_to_cupy,
"cudf": _convert_to_cudf,
"pandas": _convert_to_pandas,
"gpuarray": _convert_to_gpuarray,
"gpuarray-c": _convert_to_gpuarray_c,
"scipy-sparse-csr": _convert_to_scipy_sparse_csr,
"scipy-sparse-csc": _convert_to_scipy_sparse_csc,
}
def all_datasets():
return _data_generators
@functools.lru_cache(maxsize=8)
def gen_data(
dataset_name,
dataset_format,
n_samples=0,
n_features=0,
test_fraction=0.0,
datasets_root_dir=DATASETS_DIRECTORY,
dtype=np.float32,
**kwargs,
):
"""Returns a tuple of data from the specified generator.
Parameters
----------
dataset_name : str
Dataset to use. Can be a synthetic generator (blobs or regression)
or a specified dataset (higgs currently, others coming soon)
dataset_format : str
Type of data to return. (One of cudf, numpy, pandas, gpuarray)
n_samples : int
Total number of samples to loaded including training and testing samples
test_fraction : float
Fraction of the dataset to partition randomly into the test set.
If this is 0.0, no test set will be created.
Returns
-------
(train_features, train_labels, test_features, test_labels) tuple
containing matrices or dataframes of the requested format.
test_features and test_labels may be None if no splitting was done.
"""
pickle_x_file_url = os.path.join(
datasets_root_dir, "%s_x.pkl" % dataset_name
)
pickle_y_file_url = os.path.join(
datasets_root_dir, "%s_y.pkl" % dataset_name
)
mock_datasets = ["regression", "classification", "blobs", "zero"]
if dataset_name in mock_datasets:
X_df, y_df = _data_generators[dataset_name](
n_samples=n_samples, n_features=n_features, dtype=dtype, **kwargs
)
else:
if os.path.isfile(pickle_x_file_url):
# loading data from cache
X = pd.read_pickle(pickle_x_file_url)
y = pd.read_pickle(pickle_y_file_url)
else:
X, y = _data_generators[dataset_name](datasets_root_dir, **kwargs)
# cache the dataset for future use
X.to_pickle(pickle_x_file_url)
y.to_pickle(pickle_y_file_url)
if n_samples > X.shape[0]:
raise ValueError(
"%s dataset has only %d rows, cannot support %d"
% (dataset_name, X.shape[0], n_samples)
)
if n_features > X.shape[1]:
raise ValueError(
"%s dataset has only %d features, cannot support %d"
% (dataset_name, X.shape[1], n_features)
)
if n_samples == 0:
n_samples = X.shape[0]
if n_features == 0:
n_features = X.shape[1]
X_df = cudf.DataFrame.from_pandas(
X.iloc[0:n_samples, 0:n_features].astype(dtype)
)
y_df = cudf.Series.from_pandas(y.iloc[0:n_samples].astype(dtype))
data = (X_df, y_df)
if test_fraction != 0.0:
random_state_dict = (
{"random_state": kwargs["random_state"]}
if "random_state" in kwargs
else {}
)
X_train, X_test, y_train, y_test = tuple(
sklearn.model_selection.train_test_split(
*data,
test_size=int(n_samples * test_fraction),
**random_state_dict,
)
)
data = (X_train, y_train, X_test, y_test)
else:
data = (*data, None, None) # No test set
data = _data_converters[dataset_format](data)
return data
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/benchmark/__init__.py | #
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/benchmark/ci_benchmark.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.`
#
"""Script to benchmark cuML modules in CI
NOTE: This is currently experimental as the ops team builds out the CI
platform to support benchmark reporting.
"""
from cuml.benchmark.runners import run_variations
from cuml.benchmark import algorithms
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
def log_range(start, end, n):
return np.logspace(np.log10(start), np.log10(end), num=n, dtype=np.int32)
def expand_params(key, vals):
return [{key: v} for v in vals]
def report_asv(
results_df, output_dir, cudaVer="", pythonVer="", osType="", machineName=""
):
"""Logs the dataframe `results_df` to airspeed velocity format.
This writes (or appends to) JSON files in `output_dir`
Parameters
----------
results_df : pd.DataFrame
DataFrame with one row per benchmark run
output_dir : str
Directory for ASV output database
"""
import asvdb
import platform
import psutil
uname = platform.uname()
(commitHash, commitTime) = asvdb.utils.getCommitInfo()
b_info = asvdb.BenchmarkInfo(
machineName=machineName or uname.machine,
cudaVer=cudaVer or "unknown",
osType=osType or "%s %s" % (uname.system, uname.release),
pythonVer=pythonVer or platform.python_version(),
commitHash=commitHash,
commitTime=commitTime,
gpuType="unknown",
cpuType=uname.processor,
arch=uname.machine,
ram="%d" % psutil.virtual_memory().total,
)
(
repo,
branch,
) = asvdb.utils.getRepoInfo() # gets repo info from CWD by default
db = asvdb.ASVDb(dbDir=output_dir, repo=repo, branches=[branch])
for index, row in results_df.iterrows():
val_keys = ["cu_time", "cpu_time", "speedup", "cuml_acc", "cpu_acc"]
params = [(k, v) for k, v in row.items() if k not in val_keys]
result = asvdb.BenchmarkResult(
row["algo"], params, result=row["cu_time"]
)
db.addResult(b_info, result)
preprocessing_algo_defs = [
(
"StandardScaler",
"classification",
[1000000],
[256, 1024],
[{"copy": False}],
),
(
"MinMaxScaler",
"classification",
[1000000],
[256, 1024],
[{"copy": False}],
),
(
"MaxAbsScaler",
"classification",
[1000000],
[256, 1024],
[{"copy": False}],
),
(
"Normalizer",
"classification",
[1000000],
[256, 1024],
[{"copy": False}],
),
(
"RobustScaler",
"classification",
[1000000],
[128, 256],
[{"copy": False}],
),
(
"SimpleImputer",
"classification",
[1000000],
[256, 1024],
[{"copy": False}],
),
("PolynomialFeatures", "classification", [1000000], [128, 256], [{}]),
(
"SparseCSRStandardScaler",
"classification",
[1000000],
[512],
[{"copy": False, "with_mean": False}],
),
(
"SparseCSRMaxAbsScaler",
"classification",
[300000],
[512],
[{"copy": False}],
),
(
"SparseCSRNormalizer",
"classification",
[1000000],
[512],
[{"copy": False}],
),
(
"SparseCSCRobustScaler",
"classification",
[1000000],
[512],
[{"copy": False, "with_centering": False}],
),
(
"SparseCSCSimpleImputer",
"classification",
[1000000],
[512],
[{"copy": False}],
),
("SparseCSRPolynomialFeatures", "classification", [30000], [128], [{}]),
]
preprocessing_algo_names = set([a[0] for a in preprocessing_algo_defs])
def make_bench_configs(long_config):
"""Defines the configurations we want to benchmark
If `long_config` is True, this may take over an hour.
If False, the run should take only a few minutes."""
configs = []
if long_config:
# Use large_rows for pretty fast algos,
# use small_rows for slower ones
small_rows = log_range(10000, 1000000, 2)
large_rows = log_range(1e5, 1e7, 2)
else:
# Small config only runs a single size
small_rows = log_range(20000, 20000, 1)
large_rows = log_range(100000, 100000, 1)
default_dims = [16, 256]
# Add all the simple algorithms that don't need special treatment
algo_defs = [
("KMeans", "blobs", small_rows, default_dims, [{}]),
("DBScan", "blobs", small_rows, default_dims, [{}]),
("TSNE", "blobs", small_rows, default_dims, [{}]),
("NearestNeighbors", "blobs", small_rows, default_dims, [{}]),
("MBSGDClassifier", "blobs", large_rows, default_dims, [{}]),
(
"LogisticRegression",
"classification",
large_rows,
default_dims,
[{}],
),
("LinearRegression", "regression", large_rows, default_dims, [{}]),
("Lasso", "regression", large_rows, default_dims, [{}]),
("ElasticNet", "regression", large_rows, default_dims, [{}]),
(
"PCA",
"blobs",
large_rows,
[32, 256],
expand_params("n_components", [2, 25]),
),
(
"tSVD",
"blobs",
large_rows,
[32, 256],
expand_params("n_components", [2, 25]),
),
(
"GaussianRandomProjection",
"blobs",
large_rows,
[32, 256],
expand_params("n_components", [2, 25]),
),
]
algo_defs += preprocessing_algo_defs
for algo_name, dataset_name, rows, dims, params in algo_defs:
configs.append(
dict(
algo_name=algo_name,
dataset_name=dataset_name,
bench_rows=rows,
bench_dims=dims,
param_override_list=params,
)
)
# Explore some more interesting params for RF
if long_config:
configs += [
dict(
algo_name="RandomForestClassifier",
dataset_name="classification",
bench_rows=small_rows,
bench_dims=default_dims,
cuml_param_override_list=[
{"n_bins": [8, 32]},
{"max_features": ["sqrt", 1.0]},
],
)
]
return configs
bench_config = {
"short": make_bench_configs(False),
"long": make_bench_configs(True),
}
if __name__ == "__main__":
import argparse
allAlgoNames = set(
[v["algo_name"] for tuples in bench_config.values() for v in tuples]
)
parser = argparse.ArgumentParser(
prog="ci_benchmark",
description="""
Tool for running benchmarks in CI
""",
)
parser.add_argument(
"--benchmark", type=str, choices=bench_config.keys(), default="short"
)
parser.add_argument(
"--algo",
type=str,
action="append",
help='Algorithm to run, must be one of %s, or "ALL"'
% ", ".join(['"%s"' % k for k in allAlgoNames]),
)
parser.add_argument(
"--update_asv_dir",
type=str,
help="Add results to the specified ASV dir in ASV " "format",
)
parser.add_argument(
"--report_cuda_ver",
type=str,
default="",
help="The CUDA version to include in reports",
)
parser.add_argument(
"--report_python_ver",
type=str,
default="",
help="The Python version to include in reports",
)
parser.add_argument(
"--report_os_type",
type=str,
default="",
help="The OS type to include in reports",
)
parser.add_argument(
"--report_machine_name",
type=str,
default="",
help="The machine name to include in reports",
)
parser.add_argument("--n_reps", type=int, default=3)
args = parser.parse_args()
algos = set(args.algo)
if "preprocessing" in algos:
algos = algos.union(preprocessing_algo_names)
algos.remove("preprocessing")
invalidAlgoNames = algos - allAlgoNames
if invalidAlgoNames:
raise ValueError("Invalid algo name(s): %s" % invalidAlgoNames)
bench_to_run = bench_config[args.benchmark]
default_args = dict(run_cpu=True, n_reps=args.n_reps)
all_results = []
for cfg_in in bench_to_run:
if (
(algos is None)
or ("ALL" in algos)
or (cfg_in["algo_name"] in algos)
):
# Pass an actual algo object instead of an algo_name string
cfg = cfg_in.copy()
algo = algorithms.algorithm_by_name(cfg_in["algo_name"])
cfg["algos"] = [algo]
alg_name = cfg["algo_name"]
if alg_name.startswith("Sparse"):
if alg_name.startswith("SparseCSR"):
input_type = "scipy-sparse-csr"
elif alg_name.startswith("SparseCSC"):
input_type = "scipy-sparse-csc"
else:
input_type = "numpy"
del cfg["algo_name"]
res = run_variations(
**{**default_args, **cfg}, input_type=input_type
)
all_results.append(res)
results_df = pd.concat(all_results)
print(results_df)
if args.update_asv_dir:
report_asv(
results_df,
args.update_asv_dir,
cudaVer=args.report_cuda_ver,
pythonVer=args.report_python_ver,
osType=args.report_os_type,
machineName=args.report_machine_name,
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/benchmark/bench_helper_funcs.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.manifold import UMAP
from cuml.benchmark import datagen
from cuml.common.device_selection import using_device_type
from cuml.internals.device_type import DeviceType
from cuml.internals.global_settings import GlobalSettings
from cuml.internals.safe_imports import (
cpu_only_import,
gpu_only_import,
gpu_only_import_from,
safe_import,
)
import sklearn.ensemble as skl_ensemble
import pickle as pickle
import os
import cuml
from cuml.internals import input_utils
from time import perf_counter
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
cudf = gpu_only_import("cudf")
cuda = gpu_only_import_from("numba", "cuda")
cp = gpu_only_import("cupy")
xgb = safe_import("xgboost")
treelite = safe_import("treelite")
def call(m, func_name, X, y=None):
def unwrap_and_get_args(func):
if hasattr(func, "__wrapped__"):
return unwrap_and_get_args(func.__wrapped__)
else:
return func.__code__.co_varnames
if not hasattr(m, func_name):
raise ValueError("Model does not have function " + func_name)
func = getattr(m, func_name)
argnames = unwrap_and_get_args(func)
if y is not None and "y" in argnames:
func(X, y=y)
else:
func(X)
def pass_func(m, x, y=None):
pass
def fit(m, x, y=None):
call(m, "fit", x, y)
def predict(m, x, y=None):
call(m, "predict", x)
def transform(m, x, y=None):
call(m, "transform", x)
def kneighbors(m, x, y=None):
call(m, "kneighbors", x)
def fit_predict(m, x, y=None):
if hasattr(m, "predict"):
fit(m, x, y)
predict(m, x)
else:
call(m, "fit_predict", x, y)
def fit_transform(m, x, y=None):
if hasattr(m, "transform"):
fit(m, x, y)
transform(m, x)
else:
call(m, "fit_transform", x, y)
def fit_kneighbors(m, x, y=None):
if hasattr(m, "kneighbors"):
fit(m, x, y)
kneighbors(m, x)
else:
call(m, "fit_kneighbors", x, y)
def _training_data_to_numpy(X, y):
"""Convert input training data into numpy format"""
if isinstance(X, np.ndarray):
X_np = X
y_np = y
elif isinstance(X, cp.ndarray):
X_np = cp.asnumpy(X)
y_np = cp.asnumpy(y)
elif isinstance(X, cudf.DataFrame):
X_np = X.to_numpy()
y_np = y.to_numpy()
elif cuda.devicearray.is_cuda_ndarray(X):
X_np = X.copy_to_host()
y_np = y.copy_to_host()
elif isinstance(X, (pd.DataFrame, pd.Series)):
X_np = datagen._convert_to_numpy(X)
y_np = datagen._convert_to_numpy(y)
else:
raise TypeError("Received unsupported input type")
return X_np, y_np
def _build_fil_classifier(m, data, args, tmpdir):
"""Setup function for FIL classification benchmarking"""
from cuml.internals.import_utils import has_xgboost
train_data, train_label = _training_data_to_numpy(data[0], data[1])
dtrain = xgb.DMatrix(train_data, label=train_label)
params = {
"silent": 1,
"eval_metric": "error",
"objective": "binary:logistic",
"tree_method": "gpu_hist",
}
params.update(args)
max_depth = args["max_depth"]
num_rounds = args["num_rounds"]
n_feature = data[0].shape[1]
train_size = data[0].shape[0]
model_name = f"xgb_{max_depth}_{num_rounds}_{n_feature}_{train_size}.model"
model_path = os.path.join(tmpdir, model_name)
bst = xgb.train(params, dtrain, num_rounds)
bst.save_model(model_path)
fil_kwargs = {
param: args[input_name]
for param, input_name in (
("algo", "fil_algo"),
("output_class", "output_class"),
("threshold", "threshold"),
("storage_type", "storage_type"),
("precision", "precision"),
)
if input_name in args
}
return m.load(model_path, **fil_kwargs)
class OptimizedFilWrapper:
"""Helper class to make use of optimized parameters in both FIL and
experimental FIL through a uniform interface"""
def __init__(
self, fil_model, optimal_chunk_size, experimental, infer_type="default"
):
self.fil_model = fil_model
self.predict_kwargs = {}
if experimental:
self.predict_kwargs["chunk_size"] = optimal_chunk_size
self.infer_type = infer_type
def predict(self, X):
if self.infer_type == "per_tree":
return self.fil_model.predict_per_tree(X, **self.predict_kwargs)
return self.fil_model.predict(X, **self.predict_kwargs)
def _build_optimized_fil_classifier(m, data, args, tmpdir):
"""Setup function for FIL classification benchmarking with optimal
parameters"""
with using_device_type("gpu"):
from cuml.internals.import_utils import has_xgboost
train_data, train_label = _training_data_to_numpy(data[0], data[1])
dtrain = xgb.DMatrix(train_data, label=train_label)
params = {
"silent": 1,
"eval_metric": "error",
"objective": "binary:logistic",
"tree_method": "gpu_hist",
}
params.update(args)
max_depth = args["max_depth"]
num_rounds = args["num_rounds"]
n_feature = data[0].shape[1]
train_size = data[0].shape[0]
model_name = (
f"xgb_{max_depth}_{num_rounds}_{n_feature}_{train_size}.model"
)
model_path = os.path.join(tmpdir, model_name)
bst = xgb.train(params, dtrain, num_rounds)
bst.save_model(model_path)
allowed_chunk_sizes = [1, 2, 4, 8, 16, 32]
if GlobalSettings().device_type is DeviceType.host:
allowed_chunk_sizes.extend((64, 128, 256))
fil_kwargs = {
param: args[input_name]
for param, input_name in (
("algo", "fil_algo"),
("output_class", "output_class"),
("threshold", "threshold"),
("storage_type", "storage_type"),
("precision", "precision"),
)
if input_name in args
}
experimental = m is cuml.experimental.ForestInference
if experimental:
allowed_storage_types = ["sparse"]
else:
allowed_storage_types = ["sparse", "sparse8"]
if args["storage_type"] == "dense":
allowed_storage_types.append("dense")
infer_type = args.get("infer_type", "default")
optimal_storage_type = "sparse"
optimal_algo = "NAIVE"
optimal_layout = "breadth_first"
optimal_chunk_size = 1
best_time = None
optimization_cycles = 5
for storage_type in allowed_storage_types:
fil_kwargs["storage_type"] = storage_type
allowed_algo_types = ["NAIVE"]
if not experimental and storage_type == "dense":
allowed_algo_types.extend(("TREE_REORG", "BATCH_TREE_REORG"))
allowed_layout_types = ["breadth_first"]
if experimental:
allowed_layout_types.append("depth_first")
for algo in allowed_algo_types:
fil_kwargs["algo"] = algo
for layout in allowed_layout_types:
if experimental:
fil_kwargs["layout"] = layout
for chunk_size in allowed_chunk_sizes:
fil_kwargs["threads_per_tree"] = chunk_size
call_args = {}
if experimental:
call_args = {"chunk_size": chunk_size}
fil_model = m.load(model_path, **fil_kwargs)
if infer_type == "per_tree":
fil_model.predict_per_tree(train_data, **call_args)
else:
fil_model.predict(train_data, **call_args)
begin = perf_counter()
if infer_type == "per_tree":
fil_model.predict_per_tree(train_data, **call_args)
else:
for _ in range(optimization_cycles):
fil_model.predict(train_data, **call_args)
end = perf_counter()
elapsed = end - begin
if best_time is None or elapsed < best_time:
best_time = elapsed
optimal_storage_type = storage_type
optimal_algo = algo
optimal_chunk_size = chunk_size
optimal_layout = layout
fil_kwargs["storage_type"] = optimal_storage_type
fil_kwargs["algo"] = optimal_algo
fil_kwargs["threads_per_tree"] = optimal_chunk_size
if experimental:
fil_kwargs["layout"] = optimal_layout
return OptimizedFilWrapper(
m.load(model_path, **fil_kwargs),
optimal_chunk_size,
experimental,
infer_type=infer_type,
)
def _build_fil_skl_classifier(m, data, args, tmpdir):
"""Trains an SKLearn classifier and returns a FIL version of it"""
train_data, train_label = _training_data_to_numpy(data[0], data[1])
params = {
"n_estimators": 100,
"max_leaf_nodes": 2**10,
"max_features": "sqrt",
"n_jobs": -1,
"random_state": 42,
}
params.update(args)
# remove keyword arguments not understood by SKLearn
for param_name in [
"fil_algo",
"output_class",
"threshold",
"storage_type",
"precision",
]:
params.pop(param_name, None)
max_leaf_nodes = args["max_leaf_nodes"]
n_estimators = args["n_estimators"]
n_feature = data[0].shape[1]
train_size = data[0].shape[0]
model_name = (
f"skl_{max_leaf_nodes}_{n_estimators}_{n_feature}_"
+ f"{train_size}.model.pkl"
)
model_path = os.path.join(tmpdir, model_name)
skl_model = skl_ensemble.RandomForestClassifier(**params)
skl_model.fit(train_data, train_label)
pickle.dump(skl_model, open(model_path, "wb"))
fil_kwargs = {
param: args[input_name]
for param, input_name in (
("algo", "fil_algo"),
("output_class", "output_class"),
("threshold", "threshold"),
("storage_type", "storage_type"),
("precision", "precision"),
)
if input_name in args
}
return m.load_from_sklearn(skl_model, **fil_kwargs)
def _build_cpu_skl_classifier(m, data, args, tmpdir):
"""Loads the SKLearn classifier and returns it"""
max_leaf_nodes = args["max_leaf_nodes"]
n_estimators = args["n_estimators"]
n_feature = data[0].shape[1]
train_size = data[0].shape[0]
model_name = (
f"skl_{max_leaf_nodes}_{n_estimators}_{n_feature}_"
+ f"{train_size}.model.pkl"
)
model_path = os.path.join(tmpdir, model_name)
skl_model = pickle.load(open(model_path, "rb"))
return skl_model
class GtilWrapper:
"""Helper class to provide interface to GTIL compatible with
benchmarking functions"""
def __init__(self, tl_model, infer_type="default"):
self.tl_model = tl_model
self.infer_type = infer_type
def predict(self, X):
if self.infer_type == "per_tree":
return treelite.gtil.predict_per_tree(self.tl_model, X)
return treelite.gtil.predict(self.tl_model, X)
def _build_gtil_classifier(m, data, args, tmpdir):
"""Setup function for treelite classification benchmarking"""
from cuml.internals.import_utils import has_xgboost
max_depth = args["max_depth"]
num_rounds = args["num_rounds"]
infer_type = args.get("infer_type", "default")
n_feature = data[0].shape[1]
train_size = data[0].shape[0]
model_name = f"xgb_{max_depth}_{num_rounds}_{n_feature}_{train_size}.model"
model_path = os.path.join(tmpdir, model_name)
bst = xgb.Booster()
bst.load_model(model_path)
tl_model = treelite.Model.from_xgboost(bst)
return GtilWrapper(tl_model, infer_type=infer_type)
def _build_treelite_classifier(m, data, args, tmpdir):
"""Setup function for treelite classification benchmarking"""
from cuml.internals.import_utils import has_xgboost
import treelite_runtime
max_depth = args["max_depth"]
num_rounds = args["num_rounds"]
n_feature = data[0].shape[1]
train_size = data[0].shape[0]
model_name = f"xgb_{max_depth}_{num_rounds}_{n_feature}_{train_size}.model"
model_path = os.path.join(tmpdir, model_name)
bst = xgb.Booster()
bst.load_model(model_path)
tl_model = treelite.Model.from_xgboost(bst)
tl_model.export_lib(
toolchain="gcc",
libpath=os.path.join(tmpdir, "treelite.so"),
params={"parallel_comp": 40},
verbose=False,
)
return treelite_runtime.Predictor(
os.path.join(tmpdir, "treelite.so"), verbose=False
)
def _treelite_fil_accuracy_score(y_true, y_pred):
"""Function to get correct accuracy for FIL (returns class index)"""
# convert the input if necessary
y_pred1 = (
y_pred.copy_to_host()
if cuda.devicearray.is_cuda_ndarray(y_pred)
else y_pred
)
y_true1 = (
y_true.copy_to_host()
if cuda.devicearray.is_cuda_ndarray(y_true)
else y_true
)
y_pred_binary = input_utils.convert_dtype(y_pred1 > 0.5, np.int32)
return cuml.metrics.accuracy_score(y_true1, y_pred_binary)
def _build_mnmg_umap(m, data, args, tmpdir):
client = args["client"]
del args["client"]
local_model = UMAP(**args)
if isinstance(data, (tuple, list)):
local_data = [x.compute() for x in data if x is not None]
if len(local_data) == 2:
X, y = local_data
local_model.fit(X, y)
else:
X = local_data
local_model.fit(X)
return m(client=client, model=local_model, **args)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/benchmark/algorithms.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import treelite_runtime
import treelite
from cuml.benchmark.bench_helper_funcs import (
fit,
transform,
predict,
fit_transform,
fit_predict,
fit_kneighbors,
_build_cpu_skl_classifier,
_build_fil_skl_classifier,
_build_fil_classifier,
_build_gtil_classifier,
_build_optimized_fil_classifier,
_build_treelite_classifier,
_treelite_fil_accuracy_score,
_training_data_to_numpy,
_build_mnmg_umap,
)
from cuml.preprocessing import (
StandardScaler,
MinMaxScaler,
MaxAbsScaler,
Normalizer,
SimpleImputer,
RobustScaler,
PolynomialFeatures,
)
import tempfile
import cuml
import sklearn
import sklearn.cluster
import sklearn.neighbors
import sklearn.ensemble
import sklearn.random_projection
import sklearn.naive_bayes
from sklearn import metrics
from sklearn.impute import SimpleImputer as skSimpleImputer
import cuml.metrics
import cuml.decomposition
import cuml.experimental
import cuml.naive_bayes
from cuml.dask import (
neighbors,
cluster,
manifold,
decomposition,
linear_model,
) # noqa: F401
from cuml.internals.import_utils import has_hdbscan, has_umap
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
if has_umap():
import umap
if has_hdbscan():
import hdbscan
class AlgorithmPair:
"""
Wraps a cuML algorithm and (optionally) a cpu-based algorithm
(typically scikit-learn, but does not need to be as long as it offers
`fit` and `predict` or `transform` methods).
Provides mechanisms to run each version with default arguments.
If no CPU-based version of the algorithm is available, pass None for the
cpu_class when instantiating
Parameters
----------
cpu_class : class
Class for CPU version of algorithm. Set to None if not available.
cuml_class : class
Class for cuML algorithm
shared_args : dict
Arguments passed to both implementations's initializer
cuml_args : dict
Arguments *only* passed to cuml's initializer
cpu_args dict
Arguments *only* passed to sklearn's initializer
accepts_labels : boolean
If True, the fit methods expects both X and y
inputs. Otherwise, it expects only an X input.
data_prep_hook : function (data -> data)
Optional function to run on input data before passing to fit
accuracy_function : function (y_test, y_pred)
Function that returns a scalar representing accuracy
bench_func : custom function to perform fit/predict/transform
calls.
"""
def __init__(
self,
cpu_class,
cuml_class,
shared_args,
cuml_args={},
cpu_args={},
name=None,
accepts_labels=True,
cpu_data_prep_hook=None,
cuml_data_prep_hook=None,
accuracy_function=None,
bench_func=fit,
setup_cpu_func=None,
setup_cuml_func=None,
):
if name:
self.name = name
else:
self.name = cuml_class.__name__
self.accepts_labels = accepts_labels
self.bench_func = bench_func
self.setup_cpu_func = setup_cpu_func
self.setup_cuml_func = setup_cuml_func
self.cpu_class = cpu_class
self.cuml_class = cuml_class
self.shared_args = shared_args
self.cuml_args = cuml_args
self.cpu_args = cpu_args
self.cpu_data_prep_hook = cpu_data_prep_hook
self.cuml_data_prep_hook = cuml_data_prep_hook
self.accuracy_function = accuracy_function
self.tmpdir = tempfile.mkdtemp()
def __str__(self):
return "AlgoPair:%s" % (self.name)
def run_cpu(self, data, bench_args={}, **override_setup_args):
"""Runs the cpu-based algorithm's fit method on specified data"""
if self.cpu_class is None:
raise ValueError("No CPU implementation for %s" % self.name)
all_args = {**self.shared_args, **self.cpu_args}
all_args = {**all_args, **override_setup_args}
if "cpu_setup_result" not in all_args:
cpu_obj = self.cpu_class(**all_args)
else:
cpu_obj = all_args["cpu_setup_result"]
if self.cpu_data_prep_hook:
data = self.cpu_data_prep_hook(data)
if self.accepts_labels:
self.bench_func(cpu_obj, data[0], data[1], **bench_args)
else:
self.bench_func(cpu_obj, data[0], **bench_args)
return cpu_obj
def run_cuml(self, data, bench_args={}, **override_setup_args):
"""Runs the cuml-based algorithm's fit method on specified data"""
all_args = {**self.shared_args, **self.cuml_args}
all_args = {**all_args, **override_setup_args}
if "cuml_setup_result" not in all_args:
cuml_obj = self.cuml_class(**all_args)
else:
cuml_obj = all_args["cuml_setup_result"]
if self.cuml_data_prep_hook:
data = self.cuml_data_prep_hook(data)
if self.accepts_labels:
self.bench_func(cuml_obj, data[0], data[1], **bench_args)
else:
self.bench_func(cuml_obj, data[0], **bench_args)
return cuml_obj
def setup_cpu(self, data, **override_args):
all_args = {**self.shared_args, **self.cpu_args}
all_args = {**all_args, **override_args}
if self.setup_cpu_func is not None:
return {
"cpu_setup_result": self.setup_cpu_func(
self.cpu_class, data, all_args, self.tmpdir
)
}
else:
return all_args
def setup_cuml(self, data, **override_args):
all_args = {**self.shared_args, **self.cuml_args}
all_args = {**all_args, **override_args}
if self.setup_cuml_func is not None:
return {
"cuml_setup_result": self.setup_cuml_func(
self.cuml_class, data, all_args, self.tmpdir
)
}
else:
return all_args
def _labels_to_int_hook(data):
"""Helper function converting labels to int32"""
return data[0], data[1].astype(np.int32)
def _treelite_format_hook(data):
"""Helper function converting data into treelite format"""
data = _training_data_to_numpy(data[0], data[1])
return treelite_runtime.DMatrix(data[0]), data[1]
def _numpy_format_hook(data):
"""Helper function converting data into numpy array"""
return _training_data_to_numpy(data[0], data[1])
def all_algorithms():
"""Returns all defined AlgorithmPair objects"""
algorithms = [
AlgorithmPair(
sklearn.cluster.KMeans,
cuml.cluster.KMeans,
shared_args=dict(
init="k-means++", n_clusters=8, max_iter=300, n_init=1
),
cuml_args=dict(oversampling_factor=0),
name="KMeans",
accepts_labels=False,
accuracy_function=metrics.homogeneity_score,
),
AlgorithmPair(
sklearn.decomposition.PCA,
cuml.PCA,
shared_args=dict(n_components=10),
name="PCA",
accepts_labels=False,
),
AlgorithmPair(
sklearn.decomposition.TruncatedSVD,
cuml.decomposition.tsvd.TruncatedSVD,
shared_args=dict(n_components=10),
name="tSVD",
accepts_labels=False,
),
AlgorithmPair(
sklearn.random_projection.GaussianRandomProjection,
cuml.random_projection.GaussianRandomProjection,
shared_args=dict(n_components=10),
name="GaussianRandomProjection",
accepts_labels=False,
),
AlgorithmPair(
sklearn.random_projection.SparseRandomProjection,
cuml.random_projection.SparseRandomProjection,
shared_args=dict(n_components=10),
name="SparseRandomProjection",
accepts_labels=False,
),
AlgorithmPair(
sklearn.neighbors.NearestNeighbors,
cuml.neighbors.NearestNeighbors,
shared_args=dict(n_neighbors=64),
cpu_args=dict(algorithm="brute", n_jobs=-1),
cuml_args={},
name="NearestNeighbors",
accepts_labels=False,
bench_func=fit_kneighbors,
),
AlgorithmPair(
sklearn.cluster.DBSCAN,
cuml.DBSCAN,
shared_args=dict(eps=3, min_samples=2),
cpu_args=dict(algorithm="brute"),
name="DBSCAN",
accepts_labels=False,
),
AlgorithmPair(
hdbscan.HDBSCAN if has_hdbscan() else None,
cuml.cluster.HDBSCAN,
shared_args={},
cpu_args={},
name="HDBSCAN",
accepts_labels=False,
),
AlgorithmPair(
sklearn.linear_model.LinearRegression,
cuml.linear_model.LinearRegression,
shared_args={},
name="LinearRegression",
accepts_labels=True,
accuracy_function=metrics.r2_score,
),
AlgorithmPair(
sklearn.linear_model.ElasticNet,
cuml.linear_model.ElasticNet,
shared_args={"alpha": 0.1, "l1_ratio": 0.5},
name="ElasticNet",
accepts_labels=True,
accuracy_function=metrics.r2_score,
),
AlgorithmPair(
sklearn.linear_model.Lasso,
cuml.linear_model.Lasso,
shared_args={},
name="Lasso",
accepts_labels=True,
accuracy_function=metrics.r2_score,
),
AlgorithmPair(
sklearn.linear_model.Ridge,
cuml.linear_model.Ridge,
shared_args={},
name="Ridge",
accepts_labels=True,
accuracy_function=metrics.r2_score,
),
AlgorithmPair(
sklearn.linear_model.LogisticRegression,
cuml.linear_model.LogisticRegression,
shared_args=dict(), # Use default solvers
name="LogisticRegression",
accepts_labels=True,
accuracy_function=metrics.accuracy_score,
),
AlgorithmPair(
sklearn.ensemble.RandomForestClassifier,
cuml.ensemble.RandomForestClassifier,
shared_args={},
cpu_args={"n_jobs": -1},
name="RandomForestClassifier",
accepts_labels=True,
cpu_data_prep_hook=_labels_to_int_hook,
cuml_data_prep_hook=_labels_to_int_hook,
accuracy_function=metrics.accuracy_score,
),
AlgorithmPair(
sklearn.ensemble.RandomForestRegressor,
cuml.ensemble.RandomForestRegressor,
shared_args={},
cpu_args={"n_jobs": -1},
name="RandomForestRegressor",
accepts_labels=True,
accuracy_function=metrics.r2_score,
),
AlgorithmPair(
sklearn.manifold.TSNE,
cuml.manifold.TSNE,
shared_args=dict(),
name="TSNE",
accepts_labels=False,
),
AlgorithmPair(
None,
cuml.linear_model.MBSGDClassifier,
shared_args={},
cuml_args=dict(eta0=0.005, epochs=100),
name="MBSGDClassifier",
accepts_labels=True,
accuracy_function=cuml.metrics.accuracy_score,
),
AlgorithmPair(
sklearn.svm.SVC,
cuml.svm.SVC,
shared_args={"kernel": "rbf"},
cuml_args={},
name="SVC-RBF",
accepts_labels=True,
accuracy_function=cuml.metrics.accuracy_score,
),
AlgorithmPair(
sklearn.svm.SVC,
cuml.svm.SVC,
shared_args={"kernel": "linear"},
cuml_args={},
name="SVC-Linear",
accepts_labels=True,
accuracy_function=cuml.metrics.accuracy_score,
),
AlgorithmPair(
sklearn.svm.SVR,
cuml.svm.SVR,
shared_args={"kernel": "rbf"},
cuml_args={},
name="SVR-RBF",
accepts_labels=True,
accuracy_function=cuml.metrics.r2_score,
),
AlgorithmPair(
sklearn.svm.SVR,
cuml.svm.SVR,
shared_args={"kernel": "linear"},
cuml_args={},
name="SVR-Linear",
accepts_labels=True,
accuracy_function=cuml.metrics.r2_score,
),
AlgorithmPair(
sklearn.svm.LinearSVC,
cuml.svm.LinearSVC,
shared_args={},
cuml_args={},
name="LinearSVC",
accepts_labels=True,
accuracy_function=cuml.metrics.accuracy_score,
),
AlgorithmPair(
sklearn.svm.LinearSVR,
cuml.svm.LinearSVR,
shared_args={},
cuml_args={},
name="LinearSVR",
accepts_labels=True,
accuracy_function=cuml.metrics.accuracy_score,
),
AlgorithmPair(
sklearn.neighbors.KNeighborsClassifier,
cuml.neighbors.KNeighborsClassifier,
shared_args={},
cuml_args={},
name="KNeighborsClassifier",
accepts_labels=True,
accuracy_function=cuml.metrics.accuracy_score,
bench_func=fit_predict,
),
AlgorithmPair(
sklearn.neighbors.KNeighborsRegressor,
cuml.neighbors.KNeighborsRegressor,
shared_args={},
cuml_args={},
name="KNeighborsRegressor",
accepts_labels=True,
accuracy_function=cuml.metrics.r2_score,
bench_func=fit_predict,
),
AlgorithmPair(
sklearn.naive_bayes.MultinomialNB,
cuml.naive_bayes.MultinomialNB,
shared_args={},
cuml_args={},
name="MultinomialNB",
accepts_labels=True,
accuracy_function=cuml.metrics.accuracy_score,
),
AlgorithmPair(
treelite,
cuml.ForestInference,
shared_args=dict(num_rounds=100, max_depth=10),
cuml_args=dict(
fil_algo="AUTO",
output_class=False,
threshold=0.5,
storage_type="auto",
precision="float32",
),
name="FIL",
accepts_labels=False,
setup_cpu_func=_build_treelite_classifier,
setup_cuml_func=_build_fil_classifier,
cpu_data_prep_hook=_treelite_format_hook,
accuracy_function=_treelite_fil_accuracy_score,
bench_func=predict,
),
AlgorithmPair(
treelite,
cuml.ForestInference,
shared_args=dict(n_estimators=100, max_leaf_nodes=2**10),
cuml_args=dict(
fil_algo="AUTO",
output_class=False,
threshold=0.5,
storage_type=True,
precision="float32",
),
name="Sparse-FIL-SKL",
accepts_labels=False,
setup_cpu_func=_build_cpu_skl_classifier,
setup_cuml_func=_build_fil_skl_classifier,
accuracy_function=_treelite_fil_accuracy_score,
bench_func=predict,
),
AlgorithmPair(
treelite,
cuml.experimental.ForestInference,
shared_args=dict(num_rounds=100, max_depth=10),
cuml_args=dict(output_class=False),
name="FILEX",
accepts_labels=False,
setup_cpu_func=_build_treelite_classifier,
setup_cuml_func=_build_fil_classifier,
cpu_data_prep_hook=_treelite_format_hook,
accuracy_function=_treelite_fil_accuracy_score,
bench_func=predict,
),
AlgorithmPair(
treelite,
cuml.experimental.ForestInference,
shared_args=dict(num_rounds=100, max_depth=10),
cuml_args=dict(
fil_algo="NAIVE",
storage_type="DENSE",
output_class=False,
precision="float32",
infer_type="default",
),
name="FILEX-Optimized",
accepts_labels=False,
setup_cpu_func=_build_treelite_classifier,
setup_cuml_func=_build_optimized_fil_classifier,
cpu_data_prep_hook=_treelite_format_hook,
accuracy_function=_treelite_fil_accuracy_score,
bench_func=predict,
),
AlgorithmPair(
treelite,
cuml.ForestInference,
shared_args=dict(num_rounds=100, max_depth=10),
cuml_args=dict(
fil_algo="NAIVE",
storage_type="DENSE",
output_class=False,
threshold=0.5,
precision="float32",
),
name="FIL-Optimized",
accepts_labels=False,
setup_cpu_func=_build_treelite_classifier,
setup_cuml_func=_build_optimized_fil_classifier,
cpu_data_prep_hook=_treelite_format_hook,
accuracy_function=_treelite_fil_accuracy_score,
bench_func=predict,
),
AlgorithmPair(
treelite,
cuml.experimental.ForestInference,
shared_args=dict(n_estimators=100, max_leaf_nodes=2**10),
cuml_args=dict(output_class=False),
name="Sparse-FILEX-SKL",
accepts_labels=False,
setup_cpu_func=_build_cpu_skl_classifier,
setup_cuml_func=_build_fil_skl_classifier,
accuracy_function=_treelite_fil_accuracy_score,
bench_func=predict,
),
AlgorithmPair(
treelite,
cuml.experimental.ForestInference,
shared_args=dict(
num_rounds=100, max_depth=10, infer_type="per_tree"
),
cuml_args=dict(
fil_algo="NAIVE",
storage_type="DENSE",
output_class=False,
precision="float32",
),
name="FILEX-PerTree",
accepts_labels=False,
setup_cpu_func=_build_gtil_classifier,
setup_cuml_func=_build_optimized_fil_classifier,
cpu_data_prep_hook=_numpy_format_hook,
accuracy_function=_treelite_fil_accuracy_score,
bench_func=predict,
),
AlgorithmPair(
umap.UMAP if has_umap() else None,
cuml.manifold.UMAP,
shared_args=dict(n_neighbors=5, n_epochs=500),
name="UMAP-Unsupervised",
accepts_labels=False,
accuracy_function=cuml.metrics.trustworthiness,
),
AlgorithmPair(
umap.UMAP if has_umap() else None,
cuml.manifold.UMAP,
shared_args=dict(n_neighbors=5, n_epochs=500),
name="UMAP-Supervised",
accepts_labels=True,
accuracy_function=cuml.metrics.trustworthiness,
),
AlgorithmPair(
sklearn.preprocessing.StandardScaler,
StandardScaler,
shared_args=dict(),
name="StandardScaler",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
sklearn.preprocessing.MinMaxScaler,
MinMaxScaler,
shared_args=dict(),
name="MinMaxScaler",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
sklearn.preprocessing.MaxAbsScaler,
MaxAbsScaler,
shared_args=dict(),
name="MaxAbsScaler",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
sklearn.preprocessing.Normalizer,
Normalizer,
shared_args=dict(),
name="Normalizer",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
skSimpleImputer,
SimpleImputer,
shared_args=dict(),
name="SimpleImputer",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
sklearn.preprocessing.RobustScaler,
RobustScaler,
shared_args=dict(),
name="RobustScaler",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
sklearn.preprocessing.PolynomialFeatures,
PolynomialFeatures,
shared_args=dict(),
name="PolynomialFeatures",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
sklearn.preprocessing.StandardScaler,
StandardScaler,
shared_args=dict(),
name="SparseCSRStandardScaler",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
sklearn.preprocessing.MinMaxScaler,
MinMaxScaler,
shared_args=dict(),
name="SparseCSRMinMaxScaler",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
sklearn.preprocessing.MaxAbsScaler,
MaxAbsScaler,
shared_args=dict(),
name="SparseCSRMaxAbsScaler",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
sklearn.preprocessing.Normalizer,
Normalizer,
shared_args=dict(),
name="SparseCSRNormalizer",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
sklearn.preprocessing.RobustScaler,
RobustScaler,
shared_args=dict(),
name="SparseCSCRobustScaler",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
skSimpleImputer,
SimpleImputer,
shared_args=dict(),
name="SparseCSCSimpleImputer",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
sklearn.preprocessing.PolynomialFeatures,
PolynomialFeatures,
shared_args=dict(),
name="SparseCSRPolynomialFeatures",
accepts_labels=False,
bench_func=fit_transform,
),
AlgorithmPair(
None,
cuml.dask.neighbors.KNeighborsClassifier,
shared_args={},
cuml_args={},
name="MNMG.KNeighborsClassifier",
bench_func=fit_predict,
accepts_labels=True,
accuracy_function=cuml.metrics.accuracy_score,
),
AlgorithmPair(
None,
cuml.dask.cluster.KMeans,
shared_args=dict(n_clusters=8, max_iter=300, n_init=1),
cpu_args=dict(init="k-means++"),
cuml_args=dict(init="scalable-k-means++"),
name="MNMG.KMeans",
bench_func=fit_predict,
accepts_labels=False,
accuracy_function=metrics.homogeneity_score,
),
AlgorithmPair(
None,
cuml.dask.cluster.DBSCAN,
shared_args=dict(eps=3, min_samples=2),
cpu_args=dict(algorithm="brute"),
name="MNMG.DBSCAN",
bench_func=fit_predict,
accepts_labels=False,
),
AlgorithmPair(
None,
cuml.dask.manifold.UMAP,
shared_args=dict(n_neighbors=5, n_epochs=500),
name="MNMG.UMAP-Unsupervised",
bench_func=transform,
setup_cuml_func=_build_mnmg_umap,
accepts_labels=False,
accuracy_function=cuml.metrics.trustworthiness,
),
AlgorithmPair(
None,
cuml.dask.manifold.UMAP,
shared_args=dict(n_neighbors=5, n_epochs=500),
name="MNMG.UMAP-Supervised",
bench_func=transform,
setup_cuml_func=_build_mnmg_umap,
accepts_labels=True,
accuracy_function=cuml.metrics.trustworthiness,
),
AlgorithmPair(
None,
cuml.dask.neighbors.NearestNeighbors,
shared_args=dict(n_neighbors=64),
cpu_args=dict(algorithm="brute", n_jobs=-1),
cuml_args={},
name="MNMG.NearestNeighbors",
accepts_labels=False,
bench_func=fit_kneighbors,
),
AlgorithmPair(
None,
cuml.dask.decomposition.TruncatedSVD,
shared_args=dict(n_components=10),
name="MNMG.tSVD",
accepts_labels=False,
),
AlgorithmPair(
None,
cuml.dask.decomposition.PCA,
shared_args=dict(n_components=10),
name="MNMG.PCA",
accepts_labels=False,
),
AlgorithmPair(
None,
cuml.dask.linear_model.LinearRegression,
shared_args={},
name="MNMG.LinearRegression",
bench_func=fit_predict,
accepts_labels=True,
accuracy_function=metrics.r2_score,
),
AlgorithmPair(
None,
cuml.dask.linear_model.Lasso,
shared_args={},
name="MNMG.Lasso",
bench_func=fit_predict,
accepts_labels=True,
accuracy_function=metrics.r2_score,
),
AlgorithmPair(
None,
cuml.dask.linear_model.ElasticNet,
shared_args={"alpha": 0.1, "l1_ratio": 0.5},
name="MNMG.ElasticNet",
bench_func=fit_predict,
accepts_labels=True,
accuracy_function=metrics.r2_score,
),
AlgorithmPair(
None,
cuml.dask.linear_model.Ridge,
shared_args={},
name="MNMG.Ridge",
bench_func=fit_predict,
accepts_labels=True,
accuracy_function=metrics.r2_score,
),
AlgorithmPair(
None,
cuml.dask.neighbors.KNeighborsRegressor,
shared_args={},
cuml_args={},
name="MNMG.KNeighborsRegressor",
bench_func=fit_predict,
accepts_labels=True,
accuracy_function=cuml.metrics.r2_score,
),
]
return algorithms
def algorithm_by_name(name):
"""Returns the algorithm pair with the name 'name' (case-insensitive)"""
algos = all_algorithms()
return next((a for a in algos if a.name.lower() == name.lower()), None)
| 0 |
rapidsai_public_repos/cuml/python/cuml/benchmark | rapidsai_public_repos/cuml/python/cuml/benchmark/automated/bench_regression.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from .utils.utils import _benchmark_algo, fixture_generation_helper
from .utils.utils import bench_step # noqa: F401
from .. import datagen
#
# Core tests
#
@pytest.fixture(
**fixture_generation_helper(
{"n_samples": [1000, 10000], "n_features": [5, 400]}
)
)
def regression1(request):
data = datagen.gen_data(
"regression",
"cupy",
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
)
return data, {"dataset_type": "regression", **request.param}
@pytest.fixture(
**fixture_generation_helper(
{"n_samples": [500, 4000], "n_features": [5, 400]}
)
)
def regression2(request):
data = datagen.gen_data(
"regression",
"cupy",
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
)
return data, {"dataset_type": "regression", **request.param}
def bench_linear_regression(
gpubenchmark, bench_step, regression1 # noqa: F811
):
_benchmark_algo(gpubenchmark, "LinearRegression", bench_step, regression1)
def bench_lasso(gpubenchmark, bench_step, regression1): # noqa: F811
_benchmark_algo(gpubenchmark, "Lasso", bench_step, regression1)
def bench_elastic(gpubenchmark, bench_step, regression1): # noqa: F811
_benchmark_algo(gpubenchmark, "ElasticNet", bench_step, regression1)
def bench_ridge(gpubenchmark, bench_step, regression1): # noqa: F811
_benchmark_algo(gpubenchmark, "Ridge", bench_step, regression1)
def bench_knnregressor(gpubenchmark, bench_step, regression1): # noqa: F811
_benchmark_algo(
gpubenchmark, "KNeighborsRegressor", bench_step, regression1
)
def bench_svr_rbf(gpubenchmark, bench_step, regression1): # noqa: F811
_benchmark_algo(gpubenchmark, "SVR-RBF", bench_step, regression1)
def bench_svr_linear(gpubenchmark, bench_step, regression2): # noqa: F811
_benchmark_algo(gpubenchmark, "SVR-Linear", bench_step, regression2)
| 0 |
rapidsai_public_repos/cuml/python/cuml/benchmark | rapidsai_public_repos/cuml/python/cuml/benchmark/automated/bench_preprocessing.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from .utils.utils import _benchmark_algo
from .utils.utils import bench_step # noqa: F401
from .. import datagen
#
# Core tests
#
@pytest.fixture(scope="session")
def regression(request):
dataset_kwargs = {
"dataset_type": "regression",
"n_samples": 10000,
"n_features": 100,
}
dataset = datagen.gen_data(
dataset_kwargs["dataset_type"],
"cupy",
n_samples=dataset_kwargs["n_samples"],
n_features=dataset_kwargs["n_features"],
)
return dataset, dataset_kwargs
def bench_standardscaler(gpubenchmark, bench_step, regression): # noqa: F811
_benchmark_algo(gpubenchmark, "StandardScaler", bench_step, regression)
def bench_maxabsscaler(gpubenchmark, bench_step, regression): # noqa: F811
_benchmark_algo(gpubenchmark, "MaxAbsScaler", bench_step, regression)
def bench_normalizer(gpubenchmark, bench_step, regression): # noqa: F811
_benchmark_algo(gpubenchmark, "Normalizer", bench_step, regression)
| 0 |
rapidsai_public_repos/cuml/python/cuml/benchmark | rapidsai_public_repos/cuml/python/cuml/benchmark/automated/bench_classification.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from .utils.utils import _benchmark_algo, fixture_generation_helper
from .utils.utils import bench_step # noqa: F401
from .. import datagen
#
# Core tests
#
@pytest.fixture(
**fixture_generation_helper(
{"n_samples": [1000, 10000], "n_features": [5, 500]}
)
)
def classification(request):
data = datagen.gen_data(
"classification",
"cupy",
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
)
return data, {"dataset_type": "classification", **request.param}
def bench_logistic_regression(
gpubenchmark, bench_step, classification # noqa: F811
):
_benchmark_algo(
gpubenchmark, "LogisticRegression", bench_step, classification
)
def bench_mbsgcclf(gpubenchmark, bench_step, classification): # noqa: F811
_benchmark_algo(
gpubenchmark, "MBSGDClassifier", bench_step, classification
)
def bench_knnclassifier(
gpubenchmark, bench_step, classification # noqa: F811
):
_benchmark_algo(
gpubenchmark, "KNeighborsClassifier", bench_step, classification
)
def bench_svc_linear(gpubenchmark, bench_step, classification): # noqa: F811
_benchmark_algo(gpubenchmark, "SVC-Linear", bench_step, classification)
def bench_svc_rbf(gpubenchmark, bench_step, classification): # noqa: F811
_benchmark_algo(gpubenchmark, "SVC-RBF", bench_step, classification)
| 0 |
rapidsai_public_repos/cuml/python/cuml/benchmark | rapidsai_public_repos/cuml/python/cuml/benchmark/automated/bench_random_forest.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from .utils.utils import _benchmark_algo, fixture_generation_helper
from .utils.utils import bench_step # noqa: F401
from .. import datagen
#
# Core tests
#
@pytest.fixture(
**fixture_generation_helper(
{"n_samples": [1000, 10000], "n_features": [5, 500]}
)
)
def classification(request):
data = datagen.gen_data(
"classification",
"cupy",
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
)
return data, {"dataset_type": "classification", **request.param}
@pytest.fixture(
**fixture_generation_helper(
{"n_samples": [1000, 10000], "n_features": [5, 500]}
)
)
def regression(request):
data = datagen.gen_data(
"regression",
"cupy",
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
)
return data, {"dataset_type": "regression", **request.param}
"""
def bench_fil(gpubenchmark, bench_step, classification):
_benchmark_algo(gpubenchmark, 'FIL',
bench_step, classification)
"""
def bench_rfc(gpubenchmark, bench_step, classification): # noqa: F811
_benchmark_algo(
gpubenchmark, "RandomForestClassifier", bench_step, classification
)
def bench_rfr(gpubenchmark, bench_step, regression): # noqa: F811
_benchmark_algo(
gpubenchmark, "RandomForestRegressor", bench_step, regression
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/benchmark | rapidsai_public_repos/cuml/python/cuml/benchmark/automated/pytest.ini | [pytest]
addopts =
--benchmark-warmup=on
--benchmark-warmup-iterations=1
--benchmark-min-rounds=3
--benchmark-columns="min, max, mean, stddev, outliers, gpu_mem, rounds"
markers =
managedmem_on: RMM managed memory enabled
managedmem_off: RMM managed memory disabled
poolallocator_on: RMM pool allocator enabled
poolallocator_off: RMM pool allocator disabled
ETL: benchmarks for ETL steps
small: small datasets
tiny: tiny datasets
ML: benchmarks for ML steps
python_classes =
Bench*
Test*
python_files =
bench_*
test_*
python_functions =
bench_*
test_*
| 0 |
rapidsai_public_repos/cuml/python/cuml/benchmark | rapidsai_public_repos/cuml/python/cuml/benchmark/automated/bench_dimensionality_reduction.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from .utils.utils import _benchmark_algo, fixture_generation_helper
from .utils.utils import bench_step # noqa: F401
from .. import datagen
#
# Core tests
#
@pytest.fixture(
**fixture_generation_helper(
{"n_samples": [1000, 10000], "n_features": [5, 500]}
)
)
def blobs1(request):
data = datagen.gen_data(
"blobs",
"cupy",
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
)
return data, {"dataset_type": "blobs", **request.param}
@pytest.fixture(scope="session")
def blobs2(request):
dataset_kwargs = {
"dataset_type": "blobs",
"n_samples": 10000,
"n_features": 100,
}
dataset = datagen.gen_data(
dataset_kwargs["dataset_type"],
"cupy",
n_samples=dataset_kwargs["n_samples"],
n_features=dataset_kwargs["n_features"],
)
return dataset, dataset_kwargs
@pytest.fixture(scope="session")
def blobs3(request):
dataset_kwargs = {
"dataset_type": "blobs",
"n_samples": 50000,
"n_features": 100,
}
dataset = datagen.gen_data(
dataset_kwargs["dataset_type"],
"cupy",
n_samples=dataset_kwargs["n_samples"],
n_features=dataset_kwargs["n_features"],
)
return dataset, dataset_kwargs
def bench_kmeans(gpubenchmark, bench_step, blobs1): # noqa: F811
_benchmark_algo(gpubenchmark, "KMeans", bench_step, blobs1)
@pytest.mark.parametrize(
"algo_name",
[
"DBSCAN",
"UMAP-Unsupervised",
"UMAP-Supervised",
"NearestNeighbors",
"TSNE",
],
)
def bench_with_blobs(
gpubenchmark, algo_name, bench_step, blobs2 # noqa: F811
):
# Lump together a bunch of simple blobs-based tests
_benchmark_algo(gpubenchmark, algo_name, bench_step, blobs2)
@pytest.mark.parametrize("n_components", [2, 10, 50])
@pytest.mark.parametrize("algo_name", ["tSVD", "PCA"])
def bench_dimensionality_reduction(
gpubenchmark, algo_name, bench_step, blobs3, n_components # noqa: F811
):
_benchmark_algo(
gpubenchmark,
algo_name,
bench_step,
blobs3,
setup_kwargs={"n_components": n_components},
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/benchmark/automated | rapidsai_public_repos/cuml/python/cuml/benchmark/automated/utils/auto_nvtx_bench.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
from cuml.benchmark import datagen, algorithms
from cuml.benchmark.automated.utils.utils import setup_bench
parser = argparse.ArgumentParser(
prog="launch-benchmark",
description=r"""
Command-line cuML benchmark runner.
Examples:
python run_benchmarks.py \
--algo_name LinearRegression \
--dataset_type regression
""",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--algo_name",
type=str,
default="",
help="Algorithm name",
)
parser.add_argument(
"--dataset_type",
type=str,
default="",
help="Dataset type",
)
parser.add_argument(
"--n_samples",
type=int,
default=10000,
help="Number of samples",
)
parser.add_argument(
"--n_features",
type=int,
default=100,
help="Number of features",
)
parser.add_argument(
"--dataset_format",
type=str,
default="cupy",
help="Dataset format",
)
parser.add_argument(
"--data_kwargs",
type=json.loads,
default={},
help="Data generation options",
)
parser.add_argument(
"--setup_kwargs",
type=json.loads,
default={},
help="Algorithm setup options",
)
parser.add_argument(
"--training_kwargs",
type=json.loads,
default={},
help="Algorithm training options",
)
parser.add_argument(
"--inference_kwargs",
type=json.loads,
default={},
help="Algorithm inference options",
)
parser.add_argument(
"--json",
type=str,
default="",
help="JSON file containing benchmark parameters",
)
args = parser.parse_args()
def parse_json(args):
with open(args.json) as json_file:
params = json.load(json_file)
# Overwriting
if "algo_name" in params:
args.algo_name = params["algo_name"]
if "dataset_type" in params:
args.dataset_type = params["dataset_type"]
if "n_samples" in params:
args.n_samples = params["n_samples"]
if "n_features" in params:
args.n_features = params["n_features"]
if "dataset_format" in params:
args.dataset_format = params["dataset_format"]
if "data_kwargs" in params:
args.data_kwargs = params["data_kwargs"]
if "setup_kwargs" in params:
args.setup_kwargs = params["setup_kwargs"]
if "training_kwargs" in params:
args.training_kwargs = params["training_kwargs"]
if "inference_kwargs" in params:
args.inference_kwargs = params["inference_kwargs"]
if len(args.json):
parse_json(args)
dataset = datagen.gen_data(
args.dataset_type,
args.dataset_format,
n_samples=args.n_samples,
n_features=args.n_features,
**args.data_kwargs,
)
algo = algorithms.algorithm_by_name(args.algo_name)
cuml_setup = setup_bench(
"cuml", algo, "inference", dataset, args.setup_kwargs, args.training_kwargs
)
algo.run_cuml(dataset, bench_args=args.inference_kwargs, **cuml_setup)
| 0 |
rapidsai_public_repos/cuml/python/cuml/benchmark/automated | rapidsai_public_repos/cuml/python/cuml/benchmark/automated/utils/utils.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
try:
from rapids_pytest_benchmark import setFixtureParamNames
except ImportError:
print(
"\n\nWARNING: rapids_pytest_benchmark is not installed, "
"falling back to pytest_benchmark fixtures.\n"
)
# if rapids_pytest_benchmark is not available, just perform time-only
# benchmarking and replace the util functions with nops
import pytest_benchmark
gpubenchmark = pytest_benchmark.plugin.benchmark
def setFixtureParamNames(*args, **kwargs):
pass
import os
import json
import time
import math
import itertools as it
import warnings
from cuml.internals.safe_imports import cpu_only_import, gpu_only_import
import pytest
from cuml.benchmark import datagen, algorithms
from cuml.benchmark.nvtx_benchmark import Profiler
from dask.distributed import wait
import dask.array as da
import dask.dataframe as df
from copy import copy
from cuml.benchmark.bench_helper_funcs import (
pass_func,
fit,
predict,
transform,
kneighbors,
fit_predict,
fit_transform,
fit_kneighbors,
)
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
cudf = gpu_only_import("cudf")
def distribute(client, data):
if data is not None:
n_rows = data.shape[0]
n_workers = len(client.scheduler_info()["workers"])
rows_per_chunk = math.ceil(n_rows / n_workers)
if isinstance(data, (np.ndarray, cp.ndarray)):
dask_array = da.from_array(
x=data, chunks={0: rows_per_chunk, 1: -1}
)
dask_array = dask_array.persist()
wait(dask_array)
client.rebalance()
return dask_array
elif isinstance(data, (cudf.DataFrame, cudf.Series)):
dask_df = df.from_pandas(data, chunksize=rows_per_chunk)
dask_df = dask_df.persist()
wait(dask_df)
client.rebalance()
return dask_df
else:
raise ValueError("Could not distribute data")
def nvtx_profiling(
algo_name, data_kwargs, setup_kwargs, training_kwargs, inference_kwargs
):
dataset_type = data_kwargs["dataset_type"]
n_samples = data_kwargs["n_samples"]
n_features = data_kwargs["n_features"]
dataset_format = (
data_kwargs["dataset_format"]
if "dataset_format" in data_kwargs
else "cupy"
)
data_kwargs_edited = copy(data_kwargs)
for param in ["dataset_type", "n_samples", "n_features", "dataset_format"]:
data_kwargs_edited.pop(param, None)
path = os.path.dirname(os.path.realpath(__file__))
command = """
python {path}/auto_nvtx_bench.py
--algo_name {algo_name}
--dataset_type {dataset_type}
--n_samples {n_samples}
--n_features {n_features}
--dataset_format {dataset_format}
--data_kwargs {data_kwargs}
--setup_kwargs {setup_kwargs}
--training_kwargs {training_kwargs}
--inference_kwargs {inference_kwargs}
""".format(
path=path,
algo_name=algo_name,
dataset_type=dataset_type,
n_samples=n_samples,
n_features=n_features,
dataset_format=dataset_format,
data_kwargs=json.dumps(data_kwargs_edited, separators=(",", ":")),
setup_kwargs=json.dumps(setup_kwargs, separators=(",", ":")),
training_kwargs=json.dumps(training_kwargs, separators=(",", ":")),
inference_kwargs=json.dumps(inference_kwargs, separators=(",", ":")),
)
command = command.replace("\n", "").replace("\t", " ")
command = " ".join(command.split())
print("\n\n" + "\033[96m" + "=x" * 48)
print("=x" * 20 + " NVTX BENCHMARK " + "=x" * 20)
profiler = Profiler()
profiler.profile(command)
print("=x" * 48)
print("=x" * 48 + "\033[0m" + "\n")
def cpu_bench(algo, bench_step, dataset, inference_args, cpu_setup):
if algo.cpu_class is None:
return
t = time.process_time()
if bench_step == "training":
algo.run_cpu(dataset, **cpu_setup)
elif bench_step == "inference":
algo.run_cpu(dataset, **inference_args, **cpu_setup)
elapsed_time = time.process_time() - t
print("\n" + "\033[33m" + "=x" * 20 + " CPU BENCHMARK " + "=x" * 20)
print(algo.name + " : " + str(algo.cpu_class))
print("\tbench_function: " + str(algo.bench_func))
print("\truntime: " + str(elapsed_time))
print("=x" * 48 + "\033[0m" + "\n")
def setup_bench(
platform, algo, bench_step, dataset, setup_kwargs, training_kwargs
):
"""
Will setup the AlgorithmPair and the model to be ready for benchmark
Parameters
----------
platform :
Either 'cpu' or 'cuml'
algo_name :
Algorithm/model name, can be found in the algorithms.py file
bench_step :
Either 'training' or 'inference', describe the algorithm/model
step to be benchmarked
dataset :
Dataset data
setup_kwargs :
Algorithm/model setup kwargs
training_kwargs :
Algorithm/model training kwargs
"""
# Generate the model
if platform == "cuml":
setup = algo.setup_cuml(dataset, **setup_kwargs)
elif platform == "cpu":
setup = algo.setup_cpu(dataset, **setup_kwargs)
# Set the bench_func to perform training
if bench_step == "training":
if hasattr(algo.cuml_class, "fit"):
algo.bench_func = fit
# Model cannot be trained (special construction)
elif algo.setup_cuml_func:
pytest.skip("Model cannot be trained (special construction)")
else:
raise ValueError("Training function not found")
# Train the model and then set the bench_func to perform inference
elif bench_step == "inference":
if hasattr(algo.cuml_class, "fit"):
algo.bench_func = fit
# Model cannot be trained (special construction)
elif algo.setup_cuml_func:
algo.bench_func = pass_func
else:
raise ValueError("Training function not found")
if platform == "cuml":
setup["cuml_setup_result"] = algo.run_cuml(
dataset, bench_args=training_kwargs, **setup
)
elif platform == "cpu":
setup["cpu_setup_result"] = algo.run_cpu(
dataset, bench_args=training_kwargs, **setup
)
if hasattr(algo.cuml_class, "predict"):
algo.bench_func = predict
elif hasattr(algo.cuml_class, "transform"):
algo.bench_func = transform
elif hasattr(algo.cuml_class, "kneighbors"):
algo.bench_func = kneighbors
elif any(
hasattr(algo.cuml_class, attr)
for attr in ["fit_predict", "fit_transform", "fit_kneighbors"]
):
warnings.warn(
"Inference cannot be done separately, "
"doing both training and inference"
)
if hasattr(algo.cuml_class, "fit_predict"):
algo.bench_func = fit_predict
elif hasattr(algo.cuml_class, "fit_transform"):
algo.bench_func = fit_transform
elif hasattr(algo.cuml_class, "fit_kneighbors"):
algo.bench_func = fit_kneighbors
else:
raise ValueError("Inference function not found")
else:
raise ValueError("bench_func should be either training or inference")
return setup
def _benchmark_algo(
benchmarker,
algo_name,
bench_step,
dataset,
setup_kwargs={},
training_kwargs={},
inference_kwargs={},
client=None,
):
"""
Benchmark utility
Parameters
----------
benchmarker :
Pytest benchmark function, allows to enclose the code
that should be benchmarked
algo_name :
Algorithm/model name, can be found in the algorithms.py file
bench_step :
Either 'training' or 'inference', describe the algorithm/model
step to be benchmarked
dataset :
Tuple with the data and a dictionary that describes how it was built.
The dictionary can be later used during the NVTX benchmark.
setup_kwargs :
Algorithm/model setup kwargs
training_kwargs :
Algorithm/model training kwargs
inference_kwargs :
Algorithm/model inference kwargs
client :
Dask client used in MNMG settings
"""
# Get data and dict describing how it was built
dataset, data_kwargs = dataset
# The presence of a Dask client signifies MNMG mode
MNMG_mode = client is not None
# Distribute data in MNMG settings
if MNMG_mode:
# Add the client to the setup kwargs used by model instantiation
setup_kwargs["client"] = client
# Exception : data is scattered by the MNMG DBSCAN model itself
if algo_name != "MNMG.DBSCAN":
# Distribute data
dataset = [distribute(client, d) for d in dataset]
# Search AlgorithmPair instance by name
algo = algorithms.algorithm_by_name(algo_name)
# Setup the AlgorithmPair and the model to be ready for benchmark on GPU
cuml_setup = setup_bench(
"cuml", algo, bench_step, dataset, setup_kwargs, training_kwargs
)
# Pytest benchmark
if bench_step == "training":
benchmarker(
algo.run_cuml, dataset, bench_args=training_kwargs, **cuml_setup
)
elif bench_step == "inference":
benchmarker(
algo.run_cuml, dataset, bench_args=inference_kwargs, **cuml_setup
)
# CPU benchmark and NVTX benchmark (only in SG mode)
if not MNMG_mode:
# Check that the cuML model has a CPU equivalency
if algo.cpu_class:
# Convert sataset to a Numpy array
cpu_dataset = datagen._convert_to_numpy(dataset)
# Setup the AlgorithmPair and the model
# to be ready for benchmark on CPU
cpu_setup = setup_bench(
"cpu",
algo,
bench_step,
cpu_dataset,
setup_kwargs,
training_kwargs,
)
# CPU benchmark
cpu_bench(
algo, bench_step, cpu_dataset, inference_kwargs, cpu_setup
)
# NVTX benchmark performs both the training and inference at once
# but only when bench_step == 'inference'
if bench_step == "inference":
# NVTX benchmark
nvtx_profiling(
algo_name,
data_kwargs,
setup_kwargs,
training_kwargs,
inference_kwargs,
)
def fixture_generation_helper(params):
param_names = sorted(params)
param_combis = list(
it.product(*(params[param_name] for param_name in param_names))
)
ids = ["-".join(map(str, param_combi)) for param_combi in param_combis]
param_combis = [
dict(zip(param_names, param_combi)) for param_combi in param_combis
]
return {"scope": "session", "params": param_combis, "ids": ids}
@pytest.fixture(
scope="session",
params=["training", "inference"],
ids=["training", "inference"],
)
def bench_step(request):
return request.param
| 0 |
rapidsai_public_repos/cuml/python/cuml/benchmark/automated | rapidsai_public_repos/cuml/python/cuml/benchmark/automated/dask/bench_mnmg_regression.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from ..utils.utils import _benchmark_algo, fixture_generation_helper
from ..utils.utils import bench_step # noqa: F401
from ... import datagen
#
# Core tests
#
@pytest.fixture(
**fixture_generation_helper({"n_samples": [10000], "n_features": [5, 500]})
)
def regression(request):
data = datagen.gen_data(
"regression",
"cupy",
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
)
return data, None
def bench_linear_regression(
gpubenchmark, bench_step, regression, client # noqa: F811
):
_benchmark_algo(
gpubenchmark,
"MNMG.LinearRegression",
bench_step,
regression,
client=client,
)
def bench_mnmg_lasso(
gpubenchmark, bench_step, regression, client # noqa: F811
):
_benchmark_algo(
gpubenchmark, "MNMG.Lasso", bench_step, regression, client=client
)
def bench_mnmg_elastic(
gpubenchmark, bench_step, regression, client # noqa: F811
):
_benchmark_algo(
gpubenchmark, "MNMG.ElasticNet", bench_step, regression, client=client
)
def bench_mnmg_ridge(
gpubenchmark, bench_step, regression, client # noqa: F811
):
_benchmark_algo(
gpubenchmark, "MNMG.Ridge", bench_step, regression, client=client
)
def bench_mnmg_knnregressor(
gpubenchmark, bench_step, regression, client # noqa: F811
):
_benchmark_algo(
gpubenchmark,
"MNMG.KNeighborsRegressor",
bench_step,
regression,
client=client,
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/benchmark/automated | rapidsai_public_repos/cuml/python/cuml/benchmark/automated/dask/bench_mnmg_dimensionality_reduction.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from ..utils.utils import _benchmark_algo, fixture_generation_helper
from ..utils.utils import bench_step # noqa: F401
from ... import datagen
#
# Core tests
#
@pytest.fixture(
**fixture_generation_helper(
{"n_samples": [1000, 10000], "n_features": [5, 500]}
)
)
def blobs1(request):
data = datagen.gen_data(
"classification",
"cupy",
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
)
return data, None
@pytest.fixture(scope="session")
def blobs2(request):
dataset_kwargs = {
"dataset_type": "blobs",
"n_samples": 10000,
"n_features": 100,
}
dataset = datagen.gen_data(
dataset_kwargs["dataset_type"],
"cupy",
n_samples=dataset_kwargs["n_samples"],
n_features=dataset_kwargs["n_features"],
)
return dataset, dataset_kwargs
@pytest.fixture(scope="session")
def blobs3(request):
dataset_kwargs = {
"dataset_type": "blobs",
"n_samples": 50000,
"n_features": 100,
}
dataset = datagen.gen_data(
dataset_kwargs["dataset_type"],
"cupy",
n_samples=dataset_kwargs["n_samples"],
n_features=dataset_kwargs["n_features"],
)
return dataset, dataset_kwargs
def bench_mnmg_kmeans(gpubenchmark, bench_step, blobs1, client): # noqa: F811
_benchmark_algo(
gpubenchmark, "MNMG.KMeans", bench_step, blobs1, client=client
)
def bench_mnmg_dbscan(gpubenchmark, bench_step, blobs2, client): # noqa: F811
_benchmark_algo(
gpubenchmark, "MNMG.DBSCAN", bench_step, blobs2, client=client
)
def bench_mnmg_nearest_neighbors(
gpubenchmark, bench_step, blobs2, client # noqa: F811
):
_benchmark_algo(
gpubenchmark,
"MNMG.NearestNeighbors",
bench_step,
blobs2,
client=client,
)
@pytest.mark.parametrize(
"algo_name", ["MNMG.UMAP-Unsupervised", "MNMG.UMAP-Supervised"]
)
def bench_mnmg_umap(
gpubenchmark, algo_name, bench_step, blobs2, client # noqa: F811
):
_benchmark_algo(gpubenchmark, algo_name, bench_step, blobs2, client=client)
@pytest.mark.parametrize("algo_name", ["MNMG.tSVD", "MNMG.PCA"])
@pytest.mark.parametrize("n_components", [2, 10, 50])
def bench_mnmg_dimensionality_reduction(
gpubenchmark,
algo_name,
bench_step,
blobs3, # noqa: F811
client,
n_components,
):
_benchmark_algo(
gpubenchmark,
algo_name,
bench_step,
blobs3,
setup_kwargs={"n_components": n_components},
client=client,
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/benchmark/automated | rapidsai_public_repos/cuml/python/cuml/benchmark/automated/dask/conftest.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from dask_cuda import initialize
from dask_cuda import LocalCUDACluster
from dask_cuda.utils_test import IncreasedCloseTimeoutNanny
from dask.distributed import Client
enable_tcp_over_ucx = True
enable_nvlink = False
enable_infiniband = False
@pytest.fixture(scope="module")
def cluster():
cluster = LocalCUDACluster(
protocol="tcp",
scheduler_port=0,
worker_class=IncreasedCloseTimeoutNanny,
)
yield cluster
cluster.close()
@pytest.fixture(scope="function")
def client(cluster):
client = Client(cluster)
yield client
client.close()
@pytest.fixture(scope="module")
def ucx_cluster():
initialize.initialize(
create_cuda_context=True,
enable_tcp_over_ucx=enable_tcp_over_ucx,
enable_nvlink=enable_nvlink,
enable_infiniband=enable_infiniband,
)
cluster = LocalCUDACluster(
protocol="ucx",
enable_tcp_over_ucx=enable_tcp_over_ucx,
enable_nvlink=enable_nvlink,
enable_infiniband=enable_infiniband,
worker_class=IncreasedCloseTimeoutNanny,
)
yield cluster
cluster.close()
@pytest.fixture(scope="function")
def ucx_client(ucx_cluster):
client = Client(ucx_cluster)
yield client
client.close()
| 0 |
rapidsai_public_repos/cuml/python/cuml/benchmark/automated | rapidsai_public_repos/cuml/python/cuml/benchmark/automated/dask/bench_mnmg_classification.py | #
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from ..utils.utils import _benchmark_algo, fixture_generation_helper
from ..utils.utils import bench_step # noqa: F401
from ... import datagen
#
# Core tests
#
@pytest.fixture(
**fixture_generation_helper(
{"n_samples": [1000, 10000], "n_features": [5, 500]}
)
)
def classification(request):
data = datagen.gen_data(
"classification",
"cudf",
n_samples=request.param["n_samples"],
n_features=request.param["n_features"],
)
return data, None
def bench_mnmg_knnclassifier(
gpubenchmark, bench_step, classification, client # noqa: F811
):
_benchmark_algo(
gpubenchmark,
"MNMG.KNeighborsClassifier",
bench_step,
classification,
client=client,
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/svm/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("linear.pyx" ${linear_svm_algo} ${svm_algo})
add_module_gpu_default("svc.pyx" ${svc_algo} ${svm_algo})
add_module_gpu_default("svm_base.pyx" ${linear_svm_algo} ${svc_algo} ${svr_algo} ${svm_algo})
add_module_gpu_default("svr.pyx" ${svr_algo} ${svm_algo})
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_sg_libraries}"
MODULE_PREFIX svm_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/svm/svc.pyx | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import('cudf')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
from cython.operator cimport dereference as deref
from libc.stdint cimport uintptr_t
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.internals.mixins import ClassifierMixin
from cuml.common.doc_utils import generate_docstring
from cuml.internals.logger import warn
from pylibraft.common.handle cimport handle_t
from pylibraft.common.interruptible import cuda_interruptible
from cuml.common import input_to_cuml_array, input_to_host_array, input_to_host_array_with_sparse_support
from cuml.internals.input_utils import input_to_cupy_array, determine_array_type_full
from cuml.preprocessing import LabelEncoder
from libcpp cimport nullptr
from cuml.svm.svm_base import SVMBase
from cuml.internals.import_utils import has_sklearn
from cuml.internals.array_sparse import SparseCumlArray
if has_sklearn():
from cuml.multiclass import MulticlassClassifier
from sklearn.calibration import CalibratedClassifierCV
cdef extern from "raft/distance/distance_types.hpp" \
namespace "raft::distance::kernels":
enum KernelType:
LINEAR,
POLYNOMIAL,
RBF,
TANH
cdef struct KernelParams:
KernelType kernel
int degree
double gamma
double coef0
cdef extern from "cuml/svm/svm_parameter.h" namespace "ML::SVM":
enum SvmType:
C_SVC,
NU_SVC,
EPSILON_SVR,
NU_SVR
cdef struct SvmParameter:
# parameters for training
double C
double cache_size
int max_iter
int nochange_steps
double tol
int verbosity
double epsilon
SvmType svmType
cdef extern from "cuml/svm/svm_model.h" namespace "ML::SVM":
cdef cppclass SupportStorage[math_t]:
int nnz
int* indptr
int* indices
math_t* data
cdef cppclass SvmModel[math_t]:
# parameters of a fitted model
int n_support
int n_cols
math_t b
math_t *dual_coefs
SupportStorage[math_t] support_matrix
int *support_idx
int n_classes
math_t *unique_labels
cdef extern from "cuml/svm/svc.hpp" namespace "ML::SVM" nogil:
cdef void svcFit[math_t](const handle_t &handle, math_t* data,
int n_rows, int n_cols,
math_t *labels,
const SvmParameter ¶m,
KernelParams &kernel_params,
SvmModel[math_t] &model,
const math_t *sample_weight) except +
cdef void svcFitSparse[math_t](const handle_t &handle, int* indptr, int* indices,
math_t* data, int n_rows, int n_cols, int nnz,
math_t *labels,
const SvmParameter ¶m,
KernelParams &kernel_params,
SvmModel[math_t] &model,
const math_t *sample_weight) except +
def apply_class_weight(handle, sample_weight, class_weight, y, verbose, output_type, dtype) -> CumlArray:
"""
Scale the sample weights with the class weights.
Returns the modified sample weights, or None if neither class weights
nor sample weights are defined. The returned weights are defined as
sample_weight[i] = class_weight[y[i]] * sample_weight[i].
Parameters:
-----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model.
sample_weight: array-like (device or host), shape = (n_samples, 1)
sample weights or None if not given
class_weight : dict or string (default=None)
Weights to modify the parameter C for class i to class_weight[i]*C. The
string 'balanced' is also accepted, in which case ``class_weight[i] =
n_samples / (n_classes * n_samples_of_class[i])``
y: array of floats or doubles, shape = (n_samples, 1)
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {{'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
dtype : dtype for sample_weights
Returns
--------
sample_weight: device array shape = (n_samples, 1) or None
"""
if class_weight is None:
return sample_weight
if type(y) is CumlArray:
y_m = y
else:
y_m, _, _, _ = input_to_cuml_array(y, check_cols=1)
le = LabelEncoder(handle=handle,
verbose=verbose,
output_type=output_type)
labels = y_m.to_output(output_type='series')
encoded_labels = cp.asarray(le.fit_transform(labels))
n_samples = y_m.shape[0]
# Define class weights for the encoded labels
if class_weight == 'balanced':
counts = cp.asnumpy(cp.bincount(encoded_labels))
n_classes = len(counts)
weights = n_samples / (n_classes * counts)
class_weight = {i: weights[i] for i in range(n_classes)}
else:
keys = class_weight.keys()
encoded_keys = le.transform(cudf.Series(keys)).values_host
class_weight = {enc_key: class_weight[key]
for enc_key, key in zip(encoded_keys, keys)}
if sample_weight is None:
sample_weight = cp.ones(y_m.shape, dtype=dtype)
else:
sample_weight, _, _, _ = \
input_to_cupy_array(sample_weight, convert_to_dtype=dtype,
check_rows=n_samples, check_cols=1)
for label, weight in class_weight.items():
sample_weight[encoded_labels==label] *= weight
return sample_weight
class SVC(SVMBase,
ClassifierMixin):
"""
SVC (C-Support Vector Classification)
Construct an SVC classifier for training and predictions.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> from cuml.svm import SVC
>>> X = cp.array([[1,1], [2,1], [1,2], [2,2], [1,3], [2,3]],
... dtype=cp.float32);
>>> y = cp.array([-1, -1, 1, -1, 1, 1], dtype=cp.float32)
>>> clf = SVC(kernel='poly', degree=2, gamma='auto', C=1)
>>> clf.fit(X, y)
SVC()
>>> print("Predicted labels:", clf.predict(X))
Predicted labels: [-1. -1. 1. -1. 1. 1.]
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
C : float (default = 1.0)
Penalty parameter C
kernel : string (default='rbf')
Specifies the kernel function. Possible options: 'linear', 'poly',
'rbf', 'sigmoid'. Currently precomputed kernels are not supported.
degree : int (default=3)
Degree of polynomial kernel function.
gamma : float or string (default = 'scale')
Coefficient for rbf, poly, and sigmoid kernels. You can specify the
numeric value, or use one of the following options:
- 'auto': gamma will be set to ``1 / n_features``
- 'scale': gamma will be se to ``1 / (n_features * X.var())``
coef0 : float (default = 0.0)
Independent term in kernel function, only significant for poly and
sigmoid
tol : float (default = 1e-3)
Tolerance for stopping criterion.
cache_size : float (default = 1024.0)
Size of the kernel cache during training in MiB. Increase it to improve
the training time, at the cost of higher memory footprint. After
training the kernel cache is deallocated.
During prediction, we also need a temporary space to store kernel
matrix elements (this can be significant if n_support is large).
The cache_size variable sets an upper limit to the prediction
buffer as well.
class_weight : dict or string (default=None)
Weights to modify the parameter C for class i to class_weight[i]*C. The
string 'balanced' is also accepted, in which case ``class_weight[i] =
n_samples / (n_classes * n_samples_of_class[i])``
max_iter : int (default = -1)
Limit the number of outer iterations in the solver.
If -1 (default) then ``max_iter=100*n_samples``
multiclass_strategy : str ('ovo' or 'ovr', default 'ovo')
Multiclass classification strategy. ``'ovo'`` uses `OneVsOneClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsOneClassifier.html>`_
while ``'ovr'`` selects `OneVsRestClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html>`_
nochange_steps : int (default = 1000)
We monitor how much our stopping criteria changes during outer
iterations. If it does not change (changes less then 1e-3*tol)
for nochange_steps consecutive steps, then we stop training.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
probability: bool (default = False)
Enable or disable probability estimates.
random_state: int (default = None)
Seed for random number generator (used only when probability = True).
Currently this argument is not used and a warning will be printed if the
user provides it.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
Attributes
----------
n_support_ : int
The total number of support vectors. Note: this will change in the
future to represent number support vectors for each class (like
in Sklearn, see https://github.com/rapidsai/cuml/issues/956 )
support_ : int, shape = (n_support)
Device array of support vector indices
support_vectors_ : float, shape (n_support, n_cols)
Device array of support vectors
dual_coef_ : float, shape = (1, n_support)
Device array of coefficients for support vectors
intercept_ : float
The constant in the decision function
fit_status_ : int
0 if SVM is correctly fitted
coef_ : float, shape (1, n_cols)
Only available for linear kernels. It is the normal of the
hyperplane.
classes_ : shape (`n_classes_`,)
Array of class labels
n_classes_ : int
Number of classes
Notes
-----
The solver uses the SMO method to fit the classifier. We use the Optimized
Hierarchical Decomposition [1]_ variant of the SMO algorithm, similar to
[2]_.
For additional docs, see `scikitlearn's SVC
<https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html>`_.
References
----------
.. [1] J. Vanek et al. A GPU-Architecture Optimized Hierarchical
Decomposition Algorithm for Support VectorMachine Training, IEEE
Transactions on Parallel and Distributed Systems, vol 28, no 12, 3330,
(2017)
.. [2] `Z. Wen et al. ThunderSVM: A Fast SVM Library on GPUs and CPUs,
Journal of Machine Learning Research, 19, 1-5 (2018)
<https://github.com/Xtra-Computing/thundersvm>`_
"""
def __init__(self, *, handle=None, C=1, kernel='rbf', degree=3,
gamma='scale', coef0=0.0, tol=1e-3, cache_size=1024.0,
max_iter=-1, nochange_steps=1000, verbose=False,
output_type=None, probability=False, random_state=None,
class_weight=None, multiclass_strategy='ovo'):
super().__init__(
handle=handle,
C=C,
kernel=kernel,
degree=degree,
gamma=gamma,
coef0=coef0,
tol=tol,
cache_size=cache_size,
max_iter=max_iter,
nochange_steps=nochange_steps,
verbose=verbose,
output_type=output_type)
self.probability = probability
self.random_state = random_state
if probability and random_state is not None:
warn("Random state is currently ignored by probabilistic SVC")
self.class_weight = class_weight
self.svmType = C_SVC
self.multiclass_strategy = multiclass_strategy
@property
@cuml.internals.api_base_return_array_skipall
def classes_(self):
if self.probability:
return self.prob_svc.classes_
elif self.n_classes_ > 2:
return self.multiclass_svc.classes_
else:
return self._unique_labels_
@property
@cuml.internals.api_base_return_array_skipall
def support_(self):
if self.n_classes_ > 2:
estimators = self.multiclass_svc.multiclass_estimator.estimators_
return cp.concatenate(
[cp.asarray(cls._support_) for cls in estimators])
else:
return self._support_
@support_.setter
def support_(self, value):
self._support_ = value
@property
@cuml.internals.api_base_return_array_skipall
def intercept_(self):
if self.n_classes_ > 2:
estimators = self.multiclass_svc.multiclass_estimator.estimators_
return cp.concatenate(
[cp.asarray(cls._intercept_) for cls in estimators])
else:
return super()._intercept_
@intercept_.setter
def intercept_(self, value):
self._intercept_ = value
def _get_num_classes(self, y):
"""
Determine the number of unique classes in y.
"""
y_m, _, _, _ = input_to_cuml_array(y, check_cols=1)
return len(cp.unique(cp.asarray(y_m)))
def _fit_multiclass(self, X, y, sample_weight) -> "SVC":
if sample_weight is not None:
warn("Sample weights are currently ignored for multi class "
"classification")
if not has_sklearn():
raise RuntimeError("Scikit-learn is needed to fit multiclass SVM")
params = self.get_params()
strategy = params.pop('multiclass_strategy', 'ovo')
self.multiclass_svc = MulticlassClassifier(
estimator=SVC(**params), handle=self.handle, verbose=self.verbose,
output_type=self.output_type, strategy=strategy)
self.multiclass_svc.fit(X, y)
# if using one-vs-one we align support_ indices to those of
# full dataset
if strategy == 'ovo':
y = cp.array(y)
classes = cp.unique(y)
n_classes = len(classes)
estimator_index = 0
# Loop through multiclass estimators and re-align support_ indices
for i in range(n_classes):
for j in range(i + 1, n_classes):
cond = cp.logical_or(y == classes[i], y == classes[j])
ovo_support = cp.array(
self.multiclass_svc.multiclass_estimator.estimators_[
estimator_index
].support_)
self.multiclass_svc.multiclass_estimator.estimators_[
estimator_index
].support_ = cp.nonzero(cond)[0][ovo_support]
estimator_index += 1
self._fit_status_ = 0
return self
def _fit_proba(self, X, y, samle_weight) -> "SVC":
params = self.get_params()
params["probability"] = False
# Ensure it always outputs numpy
params["output_type"] = "numpy"
# Currently CalibratedClassifierCV expects data on the host, see
# https://github.com/rapidsai/cuml/issues/2608
X = input_to_host_array_with_sparse_support(X)
y = input_to_host_array(y).array
if not has_sklearn():
raise RuntimeError(
"Scikit-learn is needed to use SVM probabilities")
self.prob_svc = CalibratedClassifierCV(SVC(**params),
cv=5,
method='sigmoid')
with cuml.internals.exit_internal_api():
self.prob_svc.fit(X, y)
self._fit_status_ = 0
return self
@generate_docstring(y='dense_anydtype')
@cuml.internals.api_base_return_any(set_output_dtype=True)
def fit(self, X, y, sample_weight=None, convert_dtype=True) -> "SVC":
"""
Fit the model with X and y.
"""
self.n_classes_ = self._get_num_classes(y)
# we need to check whether input X is sparse
# In that case we don't want to make a dense copy
_array_type, is_sparse = determine_array_type_full(X)
if self.probability:
if is_sparse:
raise ValueError("Probabilistic SVM does not support sparse input.")
return self._fit_proba(X, y, sample_weight)
if self.n_classes_ > 2:
return self._fit_multiclass(X, y, sample_weight)
if is_sparse:
X_m = SparseCumlArray(X)
self.n_rows = X_m.shape[0]
self.n_cols = X_m.shape[1]
self.dtype = X_m.dtype
else:
X_m, self.n_rows, self.n_cols, self.dtype = \
input_to_cuml_array(X, order='F')
# Fit binary classifier
convert_to_dtype = self.dtype if convert_dtype else None
y_m, _, _, _ = \
input_to_cuml_array(y, check_dtype=self.dtype,
convert_to_dtype=convert_to_dtype,
check_rows=self.n_rows, check_cols=1)
cdef uintptr_t y_ptr = y_m.ptr
sample_weight = apply_class_weight(self.handle, sample_weight, self.class_weight, y_m, self.verbose, self.output_type, self.dtype)
cdef uintptr_t sample_weight_ptr = <uintptr_t> nullptr
if sample_weight is not None:
sample_weight_m, _, _, _ = \
input_to_cuml_array(sample_weight, check_dtype=self.dtype,
convert_to_dtype=convert_to_dtype,
check_rows=self.n_rows, check_cols=1)
sample_weight_ptr = sample_weight_m.ptr
self._dealloc() # delete any previously fitted model
self.coef_ = None
cdef KernelParams _kernel_params = self._get_kernel_params(X_m)
cdef SvmParameter param = self._get_svm_params()
cdef SvmModel[float] *model_f
cdef SvmModel[double] *model_d
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef int n_rows = self.n_rows
cdef int n_cols = self.n_cols
cdef int n_nnz = X_m.nnz if is_sparse else -1
cdef uintptr_t X_indptr = X_m.indptr.ptr if is_sparse else X_m.ptr
cdef uintptr_t X_indices = X_m.indices.ptr if is_sparse else X_m.ptr
cdef uintptr_t X_data = X_m.data.ptr if is_sparse else X_m.ptr
if self.dtype == np.float32:
model_f = new SvmModel[float]()
if is_sparse:
with cuda_interruptible():
with nogil:
svcFitSparse(
deref(handle_), <int*>X_indptr, <int*>X_indices,
<float*>X_data, n_rows, n_cols, n_nnz,
<float*>y_ptr, param, _kernel_params,
deref(model_f), <float*>sample_weight_ptr)
else:
with cuda_interruptible():
with nogil:
svcFit(
deref(handle_), <float*>X_data, n_rows, n_cols,
<float*>y_ptr, param, _kernel_params,
deref(model_f), <float*>sample_weight_ptr)
self._model = <uintptr_t>model_f
elif self.dtype == np.float64:
model_d = new SvmModel[double]()
if is_sparse:
with cuda_interruptible():
with nogil:
svcFitSparse(
deref(handle_), <int*>X_indptr, <int*>X_indices,
<double*>X_data, n_rows, n_cols, n_nnz,
<double*>y_ptr, param, _kernel_params,
deref(model_d), <double*>sample_weight_ptr)
else:
with cuda_interruptible():
with nogil:
svcFit(
deref(handle_), <double*>X_data, n_rows, n_cols,
<double*>y_ptr, param, _kernel_params,
deref(model_d), <double*>sample_weight_ptr)
self._model = <uintptr_t>model_d
else:
raise TypeError('Input data type should be float32 or float64')
self._unpack_model()
self._fit_status_ = 0
self.handle.sync()
del X_m
del y_m
return self
@generate_docstring(return_values={'name': 'preds',
'type': 'dense',
'description': 'Predicted values',
'shape': '(n_samples, 1)'})
def predict(self, X, convert_dtype=True) -> CumlArray:
"""
Predicts the class labels for X. The returned y values are the class
labels associated to sign(decision_function(X)).
"""
if self.probability:
self._check_is_fitted('prob_svc')
X = input_to_host_array_with_sparse_support(X)
with cuml.internals.exit_internal_api():
preds = self.prob_svc.predict(X)
# prob_svc has numpy output type, change it if it is necessary:
return preds
elif self.n_classes_ > 2:
self._check_is_fitted('multiclass_svc')
return self.multiclass_svc.predict(X)
else:
return super(SVC, self).predict(X, True, convert_dtype)
@generate_docstring(skip_parameters_heading=True,
return_values={'name': 'preds',
'type': 'dense',
'description': 'Predicted \
probabilities',
'shape': '(n_samples, n_classes)'})
def predict_proba(self, X, log=False) -> CumlArray:
"""
Predicts the class probabilities for X.
The model has to be trained with probability=True to use this method.
Parameters
----------
log: boolean (default = False)
Whether to return log probabilities.
"""
if self.probability:
self._check_is_fitted('prob_svc')
X = input_to_host_array_with_sparse_support(X)
# Exit the internal API when calling sklearn code (forces numpy
# conversion)
with cuml.internals.exit_internal_api():
preds = self.prob_svc.predict_proba(X)
if (log):
preds = np.log(preds)
# prob_svc has numpy output type, change it if it is necessary:
return preds
else:
raise AttributeError("This classifier is not fitted to predict "
"probabilities. Fit a new classifier with "
"probability=True to enable predict_proba.")
@generate_docstring(return_values={'name': 'preds',
'type': 'dense',
'description': 'Log of predicted \
probabilities',
'shape': '(n_samples, n_classes)'})
@cuml.internals.api_base_return_array_skipall
def predict_log_proba(self, X) -> CumlArray:
"""
Predicts the log probabilities for X (returns log(predict_proba(x)).
The model has to be trained with probability=True to use this method.
"""
return self.predict_proba(X, log=True)
@generate_docstring(return_values={'name': 'results',
'type': 'dense',
'description': 'Decision function \
values',
'shape': '(n_samples, 1)'})
def decision_function(self, X) -> CumlArray:
"""
Calculates the decision function values for X.
"""
if self.probability:
self._check_is_fitted('prob_svc')
# Probabilistic SVC is an ensemble of simple SVC classifiers
# fitted to different subset of the training data. As such, it
# does not have a single decision function. (During prediction
# we use the calibrated probabilities to determine the class
# label.) Here we average the decision function value. This can
# be useful for visualization, but predictions should be made
# using the probabilities.
df = np.zeros((X.shape[0],))
with cuml.internals.exit_internal_api():
for clf in self.prob_svc.calibrated_classifiers_:
df = df + clf.estimator.decision_function(X)
df = df / len(self.prob_svc.calibrated_classifiers_)
return df
elif self.n_classes_ > 2:
self._check_is_fitted('multiclass_svc')
return self.multiclass_svc.decision_function(X)
else:
return super().predict(X, False)
def get_param_names(self):
params = super().get_param_names() + \
["probability", "random_state", "class_weight",
"multiclass_strategy"]
# Ignore "epsilon" since its not used in the constructor
if ("epsilon" in params):
params.remove("epsilon")
return params
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/svm/svr.pyx | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import gpu_only_import
cupy = gpu_only_import('cupy')
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
from libc.stdint cimport uintptr_t
from cuml.internals.array import CumlArray
from cuml.internals.array_sparse import SparseCumlArray
from cuml.internals.input_utils import determine_array_type_full
from cuml.internals.mixins import RegressorMixin
from cuml.common.doc_utils import generate_docstring
from pylibraft.common.handle cimport handle_t
from cuml.common import input_to_cuml_array
from libcpp cimport nullptr
from cuml.svm.svm_base import SVMBase
cdef extern from "cuml/matrix/kernelparams.h" namespace "MLCommon::Matrix":
enum KernelType:
LINEAR, POLYNOMIAL, RBF, TANH
cdef struct KernelParams:
KernelType kernel
int degree
double gamma
double coef0
cdef extern from "cuml/svm/svm_parameter.h" namespace "ML::SVM":
enum SvmType:
C_SVC, NU_SVC, EPSILON_SVR, NU_SVR
cdef struct SvmParameter:
# parameters for training
double C
double cache_size
int max_iter
int nochange_steps
double tol
int verbosity
double epsilon
SvmType svmType
cdef extern from "cuml/svm/svm_model.h" namespace "ML::SVM":
cdef cppclass SupportStorage[math_t]:
int nnz
int* indptr
int* indices
math_t* data
cdef cppclass SvmModel[math_t]:
# parameters of a fitted model
int n_support
int n_cols
math_t b
math_t *dual_coefs
SupportStorage[math_t] support_matrix
int *support_idx
int n_classes
math_t *unique_labels
cdef extern from "cuml/svm/svr.hpp" namespace "ML::SVM" nogil:
cdef void svrFit[math_t](const handle_t &handle,
math_t* data,
int n_rows,
int n_cols,
math_t *y,
const SvmParameter ¶m,
KernelParams &kernel_params,
SvmModel[math_t] &model,
const math_t *sample_weight) except+
cdef void svrFitSparse[math_t](const handle_t &handle,
int* indptr,
int* indices,
math_t* data,
int n_rows,
int n_cols,
int nnz,
math_t *y,
const SvmParameter ¶m,
KernelParams &kernel_params,
SvmModel[math_t] &model,
const math_t *sample_weight) except+
class SVR(SVMBase, RegressorMixin):
"""
SVR (Epsilon Support Vector Regression)
Construct an SVC classifier for training and predictions.
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
C : float (default = 1.0)
Penalty parameter C
kernel : string (default='rbf')
Specifies the kernel function. Possible options: 'linear', 'poly',
'rbf', 'sigmoid'. Currently precomputed kernels are not supported.
degree : int (default=3)
Degree of polynomial kernel function.
gamma : float or string (default = 'scale')
Coefficient for rbf, poly, and sigmoid kernels. You can specify the
numeric value, or use one of the following options:
- 'auto': gamma will be set to ``1 / n_features``
- 'scale': gamma will be se to ``1 / (n_features * X.var())``
coef0 : float (default = 0.0)
Independent term in kernel function, only significant for poly and
sigmoid
tol : float (default = 1e-3)
Tolerance for stopping criterion.
epsilon: float (default = 0.1)
epsilon parameter of the epsiron-SVR model. There is no penalty
associated to points that are predicted within the epsilon-tube
around the target values.
cache_size : float (default = 1024.0)
Size of the kernel cache during training in MiB. Increase it to improve
the training time, at the cost of higher memory footprint. After
training the kernel cache is deallocated.
During prediction, we also need a temporary space to store kernel
matrix elements (this can be significant if n_support is large).
The cache_size variable sets an upper limit to the prediction
buffer as well.
max_iter : int (default = -1)
Limit the number of outer iterations in the solver.
If -1 (default) then ``max_iter=100*n_samples``
nochange_steps : int (default = 1000)
We monitor how much our stopping criteria changes during outer
iterations. If it does not change (changes less then 1e-3*tol)
for nochange_steps consecutive steps, then we stop training.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
n_support_ : int
The total number of support vectors. Note: this will change in the
future to represent number support vectors for each class (like
in Sklearn, see Issue #956)
support_ : int, shape = [n_support]
Device array of support vector indices
support_vectors_ : float, shape [n_support, n_cols]
Device array of support vectors
dual_coef_ : float, shape = [1, n_support]
Device array of coefficients for support vectors
intercept_ : int
The constant in the decision function
fit_status_ : int
0 if SVM is correctly fitted
coef_ : float, shape [1, n_cols]
Only available for linear kernels. It is the normal of the
hyperplane.
``coef_ = sum_k=1..n_support dual_coef_[k] * support_vectors[k,:]``
Notes
-----
For additional docs, see `Scikit-learn's SVR
<https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html>`_.
The solver uses the SMO method to fit the regressor. We use the Optimized
Hierarchical Decomposition [1]_ variant of the SMO algorithm, similar to
[2]_
References
----------
.. [1] J. Vanek et al. A GPU-Architecture Optimized Hierarchical
Decomposition Algorithm for Support VectorMachine Training, IEEE
Transactions on Parallel and Distributed Systems, vol 28, no 12,
3330, (2017)
.. [2] `Z. Wen et al. ThunderSVM: A Fast SVM Library on GPUs and CPUs,
Journal of Machine Learning Research, 19, 1-5 (2018)
<https://github.com/Xtra-Computing/thundersvm>`_
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> from cuml.svm import SVR
>>> X = cp.array([[1], [2], [3], [4], [5]], dtype=cp.float32)
>>> y = cp.array([1.1, 4, 5, 3.9, 1.], dtype = cp.float32)
>>> reg = SVR(kernel='rbf', gamma='scale', C=10, epsilon=0.1)
>>> reg.fit(X, y)
SVR()
>>> print("Predicted values:", reg.predict(X)) # doctest: +SKIP
Predicted values: [1.200474 3.8999617 5.100488 3.7995374 1.0995375]
"""
def __init__(self, *, handle=None, C=1, kernel='rbf', degree=3,
gamma='scale', coef0=0.0, tol=1e-3, epsilon=0.1,
cache_size=1024.0, max_iter=-1, nochange_steps=1000,
verbose=False, output_type=None):
super().__init__(
handle=handle,
C=C,
kernel=kernel,
degree=degree,
gamma=gamma,
coef0=coef0,
tol=tol,
epsilon=epsilon,
cache_size=cache_size,
max_iter=max_iter,
nochange_steps=nochange_steps,
verbose=verbose,
output_type=output_type,
)
self.svmType = EPSILON_SVR
@generate_docstring()
def fit(self, X, y, sample_weight=None, convert_dtype=True) -> "SVR":
"""
Fit the model with X and y.
"""
# we need to check whether out input X is sparse
# In that case we don't want to make a dense copy
_array_type, is_sparse = determine_array_type_full(X)
if is_sparse:
X_m = SparseCumlArray(X)
self.n_rows = X_m.shape[0]
self.n_cols = X_m.shape[1]
self.dtype = X_m.dtype
else:
X_m, self.n_rows, self.n_cols, self.dtype = \
input_to_cuml_array(X, order='F')
convert_to_dtype = self.dtype if convert_dtype else None
y_m, _, _, _ = \
input_to_cuml_array(y, check_dtype=self.dtype,
convert_to_dtype=convert_to_dtype,
check_rows=self.n_rows, check_cols=1)
cdef uintptr_t y_ptr = y_m.ptr
cdef uintptr_t sample_weight_ptr = <uintptr_t> nullptr
if sample_weight is not None:
sample_weight_m, _, _, _ = \
input_to_cuml_array(sample_weight, check_dtype=self.dtype,
convert_to_dtype=convert_to_dtype,
check_rows=self.n_rows, check_cols=1)
sample_weight_ptr = sample_weight_m.ptr
self._dealloc() # delete any previously fitted model
self.coef_ = None
cdef KernelParams _kernel_params = self._get_kernel_params(X_m)
cdef SvmParameter param = self._get_svm_params()
cdef SvmModel[float] *model_f
cdef SvmModel[double] *model_d
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef int n_rows = self.n_rows
cdef int n_cols = self.n_cols
cdef int n_nnz = X_m.nnz if is_sparse else -1
cdef uintptr_t X_indptr = X_m.indptr.ptr if is_sparse else X_m.ptr
cdef uintptr_t X_indices = X_m.indices.ptr if is_sparse else X_m.ptr
cdef uintptr_t X_data = X_m.data.ptr if is_sparse else X_m.ptr
if self.dtype == np.float32:
model_f = new SvmModel[float]()
if is_sparse:
svrFitSparse(handle_[0], <int*>X_indptr, <int*>X_indices,
<float*>X_data, n_rows, n_cols, n_nnz,
<float*>y_ptr, param, _kernel_params, model_f[0],
<float*>sample_weight_ptr)
else:
svrFit(handle_[0], <float*>X_data, n_rows, n_cols,
<float*>y_ptr, param, _kernel_params, model_f[0],
<float*>sample_weight_ptr)
self._model = <uintptr_t>model_f
elif self.dtype == np.float64:
model_d = new SvmModel[double]()
if is_sparse:
svrFitSparse(handle_[0], <int*>X_indptr, <int*>X_indices,
<double*>X_data, n_rows, n_cols, n_nnz,
<double*>y_ptr, param, _kernel_params, model_d[0],
<double*>sample_weight_ptr)
else:
svrFit(handle_[0], <double*>X_data, n_rows, n_cols,
<double*>y_ptr, param, _kernel_params, model_d[0],
<double*>sample_weight_ptr)
self._model = <uintptr_t>model_d
else:
raise TypeError('Input data type should be float32 or float64')
self._unpack_model()
self._fit_status_ = 0
self.handle.sync()
del X_m
del y_m
return self
@generate_docstring(return_values={'name': 'preds',
'type': 'dense',
'description': 'Predicted values',
'shape': '(n_samples, 1)'})
def predict(self, X, convert_dtype=True) -> CumlArray:
"""
Predicts the values for X.
"""
return super(SVR, self).predict(X, False, convert_dtype)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/svm/linear_svr.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.mixins import RegressorMixin
from cuml.svm.linear import LinearSVM, LinearSVM_defaults # noqa: F401
__all__ = ["LinearSVR"]
class LinearSVR(LinearSVM, RegressorMixin):
"""
LinearSVR (Support Vector Regression with the linear kernel)
Construct a linear SVM regressor for training and predictions.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> from cuml.svm import LinearSVR
>>> X = cp.array([[1], [2], [3], [4], [5]], dtype=cp.float32)
>>> y = cp.array([1.1, 4, 5, 3.9, 8.], dtype=cp.float32)
>>> reg = LinearSVR(loss='epsilon_insensitive', C=10,
... epsilon=0.1, verbose=0)
>>> reg.fit(X, y)
LinearSVR()
>>> print("Predicted values:", reg.predict(X)) # doctest: +SKIP
Predicted labels: [1.8993504 3.3995128 4.899675 6.399837 7.899999]
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
penalty : {{'l1', 'l2'}} (default = '{LinearSVM_defaults.penalty}')
The regularization term of the target function.
loss : {LinearSVR.REGISTERED_LOSSES} (default = 'epsilon_insensitive')
The loss term of the target function.
fit_intercept : {LinearSVM_defaults.fit_intercept.__class__.__name__ \
} (default = {LinearSVM_defaults.fit_intercept})
Whether to fit the bias term. Set to False if you expect that the
data is already centered.
penalized_intercept : { \
LinearSVM_defaults.penalized_intercept.__class__.__name__ \
} (default = {LinearSVM_defaults.penalized_intercept})
When true, the bias term is treated the same way as other features;
i.e. it's penalized by the regularization term of the target function.
Enabling this feature forces an extra copying the input data X.
max_iter : {LinearSVM_defaults.max_iter.__class__.__name__ \
} (default = {LinearSVM_defaults.max_iter})
Maximum number of iterations for the underlying solver.
linesearch_max_iter : { \
LinearSVM_defaults.linesearch_max_iter.__class__.__name__ \
} (default = {LinearSVM_defaults.linesearch_max_iter})
Maximum number of linesearch (inner loop) iterations for
the underlying (QN) solver.
lbfgs_memory : { \
LinearSVM_defaults.lbfgs_memory.__class__.__name__ \
} (default = {LinearSVM_defaults.lbfgs_memory})
Number of vectors approximating the hessian for the underlying QN
solver (l-bfgs).
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
C : {LinearSVM_defaults.C.__class__.__name__ \
} (default = {LinearSVM_defaults.C})
The constant scaling factor of the loss term in the target formula
`F(X, y) = penalty(X) + C * loss(X, y)`.
grad_tol : {LinearSVM_defaults.grad_tol.__class__.__name__ \
} (default = {LinearSVM_defaults.grad_tol})
The threshold on the gradient for the underlying QN solver.
change_tol : {LinearSVM_defaults.change_tol.__class__.__name__ \
} (default = {LinearSVM_defaults.change_tol})
The threshold on the function change for the underlying QN solver.
tol : Optional[float] (default = None)
Tolerance for the stopping criterion.
This is a helper transient parameter that, when present, sets both
`grad_tol` and `change_tol` to the same value. When any of the two
`***_tol` parameters are passed as well, they take the precedence.
epsilon : {LinearSVM_defaults.epsilon.__class__.__name__ \
} (default = {LinearSVM_defaults.epsilon})
The epsilon-sensitivity parameter for the SVR loss function.
output_type : {{'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
intercept_ : float, shape (1,)
The constant in the decision function
coef_ : float, shape (1, n_cols)
The coefficients of the linear decision function.
Notes
-----
The model uses the quasi-newton (QN) solver to find the solution in the
primal space. Thus, in contrast to generic :class:`SVC<cuml.svm.SVR>`
model, it does not compute the support coefficients/vectors.
Check the solver's documentation for more details
:class:`Quasi-Newton (L-BFGS/OWL-QN)<cuml.QN>`.
For additional docs, see `scikitlearn's LinearSVR
<https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVR.html>`_.
"""
REGISTERED_LOSSES = set(
["epsilon_insensitive", "squared_epsilon_insensitive"]
)
def __init__(self, **kwargs):
# NB: the keyword arguments are filtered in python/cuml/svm/linear.pyx
# the default parameter values are reexported from
# cpp/include/cuml/svm/linear.hpp
# set regression-specific defaults
if "loss" not in kwargs:
kwargs["loss"] = "epsilon_insensitive"
super().__init__(**kwargs)
@property
def loss(self):
return self.__loss
@loss.setter
def loss(self, loss: str):
if loss not in self.REGISTERED_LOSSES:
raise ValueError(
f"Regression loss type "
f"must be one of {self.REGISTERED_LOSSES}, "
f"but given '{loss}'."
)
self.__loss = loss
def get_param_names(self):
return list(
{
"handle",
"verbose",
"penalty",
"loss",
"fit_intercept",
"penalized_intercept",
"max_iter",
"linesearch_max_iter",
"lbfgs_memory",
"C",
"grad_tol",
"change_tol",
"epsilon",
}.union(super().get_param_names())
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/svm/linear.pyx | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import re
import inspect
import typing
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
import cuml
from rmm._lib.cuda_stream_view cimport cuda_stream_view
from collections import OrderedDict
from cython.operator cimport dereference as deref
from cuml.internals.base_helpers import BaseMetaClass
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.array import CumlArray
from cuml.internals.base import Base
from pylibraft.common.handle cimport handle_t
from pylibraft.common.interruptible import cuda_interruptible
from cuml.common import input_to_cuml_array
from libc.stdint cimport uintptr_t
from libcpp cimport bool as cppbool
from cuda.ccudart cimport(
cudaMemcpyAsync,
cudaMemcpyKind,
)
__all__ = ['LinearSVM', 'LinearSVM_defaults']
cdef extern from "cuml/svm/linear.hpp" namespace "ML::SVM" nogil:
cdef enum Penalty "ML::SVM::LinearSVMParams::Penalty":
L1 "ML::SVM::LinearSVMParams::L1"
L2 "ML::SVM::LinearSVMParams::L2"
cdef enum Loss "ML::SVM::LinearSVMParams::Loss":
HINGE "ML::SVM::LinearSVMParams::\
HINGE"
SQUARED_HINGE "ML::SVM::LinearSVMParams::\
SQUARED_HINGE"
EPSILON_INSENSITIVE "ML::SVM::LinearSVMParams::\
EPSILON_INSENSITIVE"
SQUARED_EPSILON_INSENSITIVE "ML::SVM::LinearSVMParams::\
SQUARED_EPSILON_INSENSITIVE"
cdef struct LinearSVMParams:
Penalty penalty
Loss loss
cppbool fit_intercept
cppbool penalized_intercept
cppbool probability
int max_iter
int linesearch_max_iter
int lbfgs_memory
int verbose
double C
double grad_tol
double change_tol
double epsilon
cdef cppclass LinearSVMModel[T]:
const handle_t& handle
T* classes
T* w
T* probScale
size_t nClasses
size_t coefRows
size_t coefCols()
@staticmethod
LinearSVMModel[T] allocate(
const handle_t& handle,
const LinearSVMParams& params,
const size_t nCols,
const size_t nClasses) except +
@staticmethod
void free(
const handle_t& handle,
const LinearSVMModel[T]& model) except +
@staticmethod
LinearSVMModel[T] fit(
const handle_t& handle,
const LinearSVMParams& params,
const T* X,
const size_t nRows,
const size_t nCols,
const T* y,
const T* sampleWeight) except +
@staticmethod
void predict(
const handle_t& handle,
const LinearSVMParams& params,
const LinearSVMModel[T]& model,
const T* X,
const size_t nRows, const size_t nCols, T* out) except +
@staticmethod
void decisionFunction(
const handle_t& handle,
const LinearSVMParams& params,
const LinearSVMModel[T]& model,
const T* X,
const size_t nRows, const size_t nCols, T* out) except +
@staticmethod
void predictProba(
const handle_t& handle,
const LinearSVMParams& params,
const LinearSVMModel[T]& model,
const T* X,
const size_t nRows, const size_t nCols,
const cppbool log, T* out) except +
cdef class LSVMPWrapper_:
cdef readonly dict params
def __cinit__(self, **kwargs):
cdef LinearSVMParams ps
self.params = ps
def _getparam(self, key):
return self.params[key]
def _setparam(self, key, val):
self.params[key] = val
def __init__(self, **kwargs):
allowed_keys = set(self.get_param_names())
for key, val in kwargs.items():
if key in allowed_keys:
setattr(self, key, val)
def get_param_names(self):
cdef LinearSVMParams ps
return ps.keys()
# Here we can do custom conversion for selected properties.
class LSVMPWrapper(LSVMPWrapper_):
@property
def penalty(self) -> str:
if self._getparam('penalty') == Penalty.L1:
return "l1"
if self._getparam('penalty') == Penalty.L2:
return "l2"
raise ValueError(
f"Unknown penalty enum value: {self._getparam('penalty')}")
@penalty.setter
def penalty(self, penalty: str):
if penalty == "l1":
self._setparam('penalty', Penalty.L1)
elif penalty == "l2":
self._setparam('penalty', Penalty.L2)
else:
raise ValueError(f"Unknown penalty string value: {penalty}")
@property
def loss(self) -> str:
loss = self._getparam('loss')
if loss == Loss.HINGE:
return "hinge"
if loss == Loss.SQUARED_HINGE:
return "squared_hinge"
if loss == Loss.EPSILON_INSENSITIVE:
return "epsilon_insensitive"
if loss == Loss.SQUARED_EPSILON_INSENSITIVE:
return "squared_epsilon_insensitive"
raise ValueError(f"Unknown loss enum value: {loss}")
@loss.setter
def loss(self, loss: str):
if loss == "hinge":
self._setparam('loss', Loss.HINGE)
elif loss == "squared_hinge":
self._setparam('loss', Loss.SQUARED_HINGE)
elif loss == "epsilon_insensitive":
self._setparam('loss', Loss.EPSILON_INSENSITIVE)
elif loss == "squared_epsilon_insensitive":
self._setparam('loss', Loss.SQUARED_EPSILON_INSENSITIVE)
else:
raise ValueError(f"Unknown loss string value: {loss}")
# Add properties for parameters with a trivial conversion
def __add_prop(prop_name):
setattr(LSVMPWrapper, prop_name, property(
lambda self: self._getparam(prop_name),
lambda self, value: self._setparam(prop_name, value)
))
for prop_name in LSVMPWrapper().get_param_names():
if not hasattr(LSVMPWrapper, prop_name):
__add_prop(prop_name)
del __add_prop
LinearSVM_defaults = LSVMPWrapper()
# Default parameter values for LinearSVM, re-exported from C++.
cdef union SomeLinearSVMModel:
LinearSVMModel[float] float32
LinearSVMModel[double] float64
cdef class LinearSVMWrapper:
cdef readonly object dtype
cdef handle_t* handle
cdef LinearSVMParams params
cdef SomeLinearSVMModel model
cdef object __coef_
cdef object __intercept_
cdef object __classes_
cdef object __probScale_
def copy_array(
self,
target: CumlArray, source: CumlArray,
synchronize: bool = True):
cdef cuda_stream_view stream = self.handle.get_stream()
if source.shape != target.shape:
raise AttributeError(
f"Expected an array of shape {target.shape}, "
f"but got {source.shape}")
if source.dtype != target.dtype:
raise AttributeError(
f"Expected an array of type {target.dtype}, "
f"but got {source.dtype}")
cudaMemcpyAsync(
<void*><uintptr_t>target.ptr,
<void*><uintptr_t>source.ptr,
<size_t>(source.size),
cudaMemcpyKind.cudaMemcpyDeviceToDevice,
stream.value())
if synchronize:
self.handle.sync_stream()
def __cinit__(
self,
handle: cuml.Handle,
paramsWrapper: LSVMPWrapper,
coefs: typing.Optional[CumlArray] = None,
intercept: typing.Optional[CumlArray] = None,
classes: typing.Optional[CumlArray] = None,
probScale: typing.Optional[CumlArray] = None,
X: typing.Optional[CumlArray] = None,
y: typing.Optional[CumlArray] = None,
sampleWeight: typing.Optional[CumlArray] = None):
self.handle = <handle_t*><size_t>handle.getHandle()
self.params = paramsWrapper.params
# check if parameters are passed correctly
do_training = False
if coefs is None:
do_training = True
if X is None or y is None:
raise TypeError(
"You must provide either the weights "
"or input data (X, y) to the LinearSVMWrapper")
else:
do_training = False
if coefs.shape[0] > 1 and classes is None:
raise TypeError(
"You must provide classes along with the weights "
"to the LinearSVMWrapper classifier")
if self.params.probability and probScale is None:
raise TypeError(
"You must provide probability scales "
"to the LinearSVMWrapper probabolistic classifier")
if self.params.fit_intercept and intercept is None:
raise TypeError(
"You must provide intercept value to the LinearSVMWrapper"
" estimator with fit_intercept enabled")
self.dtype = X.dtype if do_training else coefs.dtype
nClasses = 0
if self.dtype != np.float32 and self.dtype != np.float64:
raise TypeError('Input data type must be float32 or float64')
cdef uintptr_t Xptr = <uintptr_t>X.ptr if X is not None else 0
cdef uintptr_t yptr = <uintptr_t>y.ptr if y is not None else 0
cdef uintptr_t swptr = <uintptr_t>sampleWeight.ptr \
if sampleWeight is not None else 0
cdef size_t nCols = 0
cdef size_t nRows = 0
if do_training:
nCols = X.shape[1]
nRows = X.shape[0]
if self.dtype == np.float32:
with cuda_interruptible():
with nogil:
self.model.float32 = LinearSVMModel[float].fit(
deref(self.handle), self.params,
<const float*>Xptr,
nRows, nCols,
<const float*>yptr,
<const float*>swptr)
nClasses = self.model.float32.nClasses
elif self.dtype == np.float64:
with cuda_interruptible():
with nogil:
self.model.float64 = LinearSVMModel[double].fit(
deref(self.handle), self.params,
<const double*>Xptr,
nRows, nCols,
<const double*>yptr,
<const double*>swptr)
nClasses = self.model.float64.nClasses
else:
nCols = coefs.shape[1]
nClasses = classes.shape[0] if classes is not None else 0
if self.dtype == np.float32:
self.model.float32 = LinearSVMModel[float].allocate(
deref(self.handle), self.params, nCols, nClasses)
elif self.dtype == np.float64:
self.model.float64 = LinearSVMModel[double].allocate(
deref(self.handle), self.params, nCols, nClasses)
# prepare the attribute arrays
cdef uintptr_t coef_ptr = 0
cdef uintptr_t intercept_ptr = 0
cdef uintptr_t classes_ptr = 0
cdef uintptr_t probScale_ptr = 0
wCols = 0
wRows = 0
if self.dtype == np.float32:
wCols = self.model.float32.coefCols()
wRows = self.model.float32.coefRows
coef_ptr = <uintptr_t>self.model.float32.w
intercept_ptr = <uintptr_t>(
self.model.float32.w + <int>(nCols * wCols))
classes_ptr = <uintptr_t>self.model.float32.classes
probScale_ptr = <uintptr_t>self.model.float32.probScale
elif self.dtype == np.float64:
wCols = self.model.float64.coefCols()
wRows = self.model.float64.coefRows
coef_ptr = <uintptr_t>self.model.float64.w
intercept_ptr = <uintptr_t>(
self.model.float64.w + <int>(nCols * wCols))
classes_ptr = <uintptr_t>self.model.float64.classes
probScale_ptr = <uintptr_t>self.model.float64.probScale
self.__coef_ = CumlArray(
coef_ptr, dtype=self.dtype,
shape=(wCols, nCols), owner=self, order='F')
self.__intercept_ = CumlArray(
intercept_ptr, dtype=self.dtype,
shape=(wCols, ), owner=self, order='F'
) if self.params.fit_intercept else None
self.__classes_ = CumlArray(
classes_ptr, dtype=self.dtype,
shape=(nClasses, ), owner=self, order='F'
) if nClasses > 0 else None
self.__probScale_ = CumlArray(
probScale_ptr, dtype=self.dtype,
shape=(wCols, 2), owner=self, order='F'
) if self.params.probability else None
# copy the passed state
if not do_training:
self.copy_array(self.__coef_, coefs, False)
if intercept is not None:
self.copy_array(self.__intercept_, intercept, False)
if classes is not None:
self.copy_array(self.__classes_, classes, False)
if probScale is not None:
self.copy_array(self.__probScale_, probScale, False)
handle.sync()
def __dealloc__(self):
if self.dtype == np.float32:
LinearSVMModel[float].free(
deref(self.handle), self.model.float32)
elif self.dtype == np.float64:
LinearSVMModel[double].free(
deref(self.handle), self.model.float64)
@property
def coef_(self) -> CumlArray:
return self.__coef_
@coef_.setter
def coef_(self, coef: CumlArray):
self.copy_array(self.__coef_, coef)
@property
def intercept_(self) -> CumlArray:
return self.__intercept_
@intercept_.setter
def intercept_(self, intercept: CumlArray):
self.copy_array(self.__intercept_, intercept)
@property
def classes_(self) -> CumlArray:
return self.__classes_
@classes_.setter
def classes_(self, classes: CumlArray):
self.copy_array(self.__classes_, classes)
@property
def probScale_(self) -> CumlArray:
return self.__probScale_
@probScale_.setter
def probScale_(self, probScale: CumlArray):
self.copy_array(self.__probScale_, probScale)
def predict(self, X: CumlArray) -> CumlArray:
y = CumlArray.empty(
shape=(X.shape[0],),
dtype=self.dtype, order='C')
if self.dtype == np.float32:
LinearSVMModel[float].predict(
deref(self.handle),
self.params,
self.model.float32,
<const float*><uintptr_t>X.ptr,
X.shape[0], X.shape[1],
<float*><uintptr_t>y.ptr)
elif self.dtype == np.float64:
LinearSVMModel[double].predict(
deref(self.handle),
self.params,
self.model.float64,
<const double*><uintptr_t>X.ptr,
X.shape[0], X.shape[1],
<double*><uintptr_t>y.ptr)
else:
raise TypeError('Input data type must be float32 or float64')
return y
def decision_function(self, X: CumlArray) -> CumlArray:
n_classes = self.classes_.shape[0]
# special handling of binary case
shape = (X.shape[0],) if n_classes <= 2 else (X.shape[0], n_classes)
y = CumlArray.empty(
shape=shape,
dtype=self.dtype, order='C')
if self.dtype == np.float32:
LinearSVMModel[float].decisionFunction(
deref(self.handle),
self.params,
self.model.float32,
<const float*><uintptr_t>X.ptr,
X.shape[0], X.shape[1],
<float*><uintptr_t>y.ptr)
elif self.dtype == np.float64:
LinearSVMModel[double].decisionFunction(
deref(self.handle),
self.params,
self.model.float64,
<const double*><uintptr_t>X.ptr,
X.shape[0], X.shape[1],
<double*><uintptr_t>y.ptr)
else:
raise TypeError('Input data type should be float32 or float64')
return y
def predict_proba(self, X, log=False) -> CumlArray:
y = CumlArray.empty(
shape=(X.shape[0], self.classes_.shape[0]),
dtype=self.dtype, order='C')
if self.dtype == np.float32:
LinearSVMModel[float].predictProba(
deref(self.handle),
self.params,
self.model.float32,
<const float*><uintptr_t>X.ptr,
X.shape[0], X.shape[1], log,
<float*><uintptr_t>y.ptr)
elif self.dtype == np.float64:
LinearSVMModel[double].predictProba(
deref(self.handle),
self.params,
self.model.float64,
<const double*><uintptr_t>X.ptr,
X.shape[0], X.shape[1], log,
<double*><uintptr_t>y.ptr)
else:
raise TypeError('Input data type should be float32 or float64')
return y
class WithReexportedParams(BaseMetaClass):
'''Additional post-processing for children of the base class:
1. Adds keyword arguments from the base classes to the signature.
Note, this does not affect __init__ method itself, only its
signature - i.e. how it appears in inspect/help/docs.
__init__ method must have `**kwargs` argument for this to
make sense.
2. Applies string.format() to the class docstring with the globals()
in the scope of the __init__ method.
This allows to write variable names (e.g. some constants) in docs,
such that they are substituted with their actual values.
Useful for reexporting default values from somewhere else.
'''
def __new__(cls, name, bases, attrs):
def get_class_params(init, parents):
# collect the keyword arguments from the class hierarchy
params = OrderedDict()
for k in parents:
params.update(
get_class_params(getattr(k, '__init__', None), k.__bases__)
)
if init is not None:
sig = inspect.signature(init)
for k, v in sig.parameters.items():
if v.kind == inspect.Parameter.KEYWORD_ONLY:
params[k] = v
del sig
return params
init = attrs.get('__init__', None)
if init is not None:
# insert keyword arguments from parents
ppams = get_class_params(init, bases)
sig = inspect.signature(init)
params = [
p for p in sig.parameters.values()
if p.kind != inspect.Parameter.KEYWORD_ONLY
]
params[1:1] = ppams.values()
attrs['__init__'].__signature__ = sig.replace(parameters=params)
del sig
# format documentation -- replace variables with values
doc = attrs.get('__doc__', None)
if doc is not None:
globs = init.__globals__.copy()
globs[name] = type(name, (), attrs)
attrs['__doc__'] = \
re.sub(r"\{ *([^ ]+) *\}", r"{\1}", doc).format(**globs)
del globs
del doc
del init
return super().__new__(cls, name, bases, attrs)
class LinearSVM(Base, metaclass=WithReexportedParams):
_model_: typing.Optional[LinearSVMWrapper]
coef_ = CumlArrayDescriptor()
intercept_ = CumlArrayDescriptor()
classes_ = CumlArrayDescriptor()
probScale_ = CumlArrayDescriptor()
@property
def model_(self) -> LinearSVMWrapper:
if self._model_ is None:
raise AttributeError(
'The model is not trained yet (call fit() first).')
return self._model_
def __getstate__(self):
state = self.__dict__.copy()
state['_model_'] = None
return state
def __init__(self, **kwargs):
# `tol` is special in that it's not present in get_param_names,
# so having a special logic here does not affect pickling/cloning.
tol = kwargs.pop('tol', None)
if tol is not None:
default_to_ratio = \
LinearSVM_defaults.change_tol / LinearSVM_defaults.grad_tol
self.grad_tol = tol
self.change_tol = tol * default_to_ratio
# All arguments are optional (they have defaults),
# yet we need to check for unused arguments
allowed_keys = set(self.get_param_names())
super_keys = set(super().get_param_names())
remaining_kwargs = {}
for key, val in kwargs.items():
if key not in allowed_keys or key in super_keys:
remaining_kwargs[key] = val
continue
if val is None:
continue
allowed_keys.remove(key)
setattr(self, key, val)
# set defaults
for key in allowed_keys:
setattr(self, key, getattr(LinearSVM_defaults, key, None))
super().__init__(**remaining_kwargs)
self._model_ = None
self.coef_ = None
self.intercept_ = None
self.classes_ = None
self.probScale_ = None
@property
def n_classes_(self) -> int:
if self.classes_ is not None:
return self.classes_.shape[0]
return self.model_.classes_.shape[0]
def fit(self, X, y, sample_weight=None, convert_dtype=True) -> 'LinearSVM':
X_m, n_rows, _n_cols, self.dtype = input_to_cuml_array(X, order='F')
convert_to_dtype = self.dtype if convert_dtype else None
y_m = input_to_cuml_array(
y, check_dtype=self.dtype,
convert_to_dtype=convert_to_dtype,
check_rows=n_rows, check_cols=1).array
sample_weight_m = input_to_cuml_array(
sample_weight, check_dtype=self.dtype,
convert_to_dtype=convert_to_dtype,
check_rows=n_rows, check_cols=1
).array if sample_weight is not None else None
self._model_ = LinearSVMWrapper(
handle=self.handle,
paramsWrapper=LSVMPWrapper(**self.get_params()),
X=X_m, y=y_m,
sampleWeight=sample_weight_m)
self.coef_ = self._model_.coef_
self.intercept_ = self._model_.intercept_
self.classes_ = self._model_.classes_
self.probScale_ = self._model_.probScale_
return self
def __sync_model(self):
'''
Update the model on C++ side lazily before calling predict.
'''
if self._model_ is None:
self._model_ = LinearSVMWrapper(
handle=self.handle,
paramsWrapper=LSVMPWrapper(**self.get_params()),
coefs=self.coef_,
intercept=self.intercept_,
classes=self.classes_,
probScale=self.probScale_)
self.coef_ = self._model_.coef_
self.intercept_ = self._model_.intercept_
self.classes_ = self._model_.classes_
self.probScale_ = self._model_.probScale_
else:
if self.coef_ is not self.model_.coef_:
self.model_.coef_ = self.coef_
if self.intercept_ is not self.model_.intercept_:
self.model_.intercept_ = self.intercept_
if self.classes_ is not self.model_.classes_:
self.model_.classes_ = self.classes_
if self.probScale_ is not self.model_.probScale_:
self.model_.probScale_ = self.probScale_
def predict(self, X, convert_dtype=True) -> CumlArray:
convert_to_dtype = self.dtype if convert_dtype else None
X_m, _, _, _ = input_to_cuml_array(
X, check_dtype=self.dtype,
convert_to_dtype=convert_to_dtype)
self.__sync_model()
return self.model_.predict(X_m)
def decision_function(self, X, convert_dtype=True) -> CumlArray:
convert_to_dtype = self.dtype if convert_dtype else None
X_m, _, _, _ = input_to_cuml_array(
X, check_dtype=self.dtype,
convert_to_dtype=convert_to_dtype)
self.__sync_model()
return self.model_.decision_function(X_m)
def predict_proba(self, X, log=False, convert_dtype=True) -> CumlArray:
convert_to_dtype = self.dtype if convert_dtype else None
X_m, _, _, _ = input_to_cuml_array(
X, check_dtype=self.dtype,
convert_to_dtype=convert_to_dtype)
self.__sync_model()
return self.model_.predict_proba(X_m, log=log)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/svm/svm_base.pyx | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import gpu_only_import
cupy = gpu_only_import('cupy')
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
from libc.stdint cimport uintptr_t
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.base import Base
from cuml.common.exceptions import NotFittedError
from pylibraft.common.handle cimport handle_t
from cuml.common import input_to_cuml_array
from cuml.internals.input_utils import determine_array_type_full
from cuml.common import using_output_type
from cuml.internals.logger import warn
from cuml.internals.mixins import FMajorInputTagMixin
from cuml.internals.array_sparse import SparseCumlArray, SparseCumlArrayInput
from libcpp cimport bool
cdef extern from "raft/distance/distance_types.hpp" \
namespace "raft::distance::kernels":
enum KernelType:
LINEAR,
POLYNOMIAL,
RBF,
TANH
cdef struct KernelParams:
KernelType kernel
int degree
double gamma
double coef0
cdef extern from "cuml/svm/svm_parameter.h" namespace "ML::SVM":
enum SvmType:
C_SVC,
NU_SVC,
EPSILON_SVR,
NU_SVR
cdef struct SvmParameter:
# parameters for training
double C
double cache_size
int max_iter
int nochange_steps
double tol
int verbosity
double epsilon
SvmType svmType
cdef extern from "cuml/svm/svm_model.h" namespace "ML::SVM":
cdef cppclass SupportStorage[math_t]:
int nnz
int* indptr
int* indices
math_t* data
cdef cppclass SvmModel[math_t]:
# parameters of a fitted model
int n_support
int n_cols
math_t b
math_t *dual_coefs
SupportStorage[math_t] support_matrix
int *support_idx
int n_classes
math_t *unique_labels
cdef extern from "cuml/svm/svc.hpp" namespace "ML::SVM":
cdef void svcPredict[math_t](
const handle_t &handle, math_t* data, int n_rows, int n_cols,
KernelParams &kernel_params, const SvmModel[math_t] &model,
math_t *preds, math_t buffer_size, bool predict_class) except +
cdef void svcPredictSparse[math_t](
const handle_t &handle, int* indptr, int* indices,
math_t* data, int n_rows, int n_cols, int nnz,
KernelParams &kernel_params, const SvmModel[math_t] &model,
math_t *preds, math_t buffer_size, bool predict_class) except +
cdef void svmFreeBuffers[math_t](const handle_t &handle,
SvmModel[math_t] &m) except +
class SVMBase(Base,
FMajorInputTagMixin):
"""
Base class for Support Vector Machines
Currently only binary classification is supported.
The solver uses the SMO method to fit the classifier. We use the Optimized
Hierarchical Decomposition [1]_ variant of the SMO algorithm, similar to
[2]_
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
C : float (default = 1.0)
Penalty parameter C
kernel : string (default='rbf')
Specifies the kernel function. Possible options: 'linear', 'poly',
'rbf', 'sigmoid'. Currently precomputed kernels are not supported.
degree : int (default=3)
Degree of polynomial kernel function.
gamma : float or string (default = 'scale')
Coefficient for rbf, poly, and sigmoid kernels. You can specify the
numeric value, or use one of the following options:
- 'auto': gamma will be set to ``1 / n_features``
- 'scale': gamma will be se to ``1 / (n_features * X.var())``
coef0 : float (default = 0.0)
Independent term in kernel function, only significant for poly and
sigmoid
tol : float (default = 1e-3)
Tolerance for stopping criterion.
cache_size : float (default = 1024.0)
Size of the kernel cache during training in MiB. Increase it to improve
the training time, at the cost of higher memory footprint. After
training the kernel cache is deallocated.
During prediction, we also need a temporary space to store kernel
matrix elements (this can be significant if n_support is large).
The cache_size variable sets an upper limit to the prediction
buffer as well.
max_iter : int (default = 100*n_samples)
Limit the number of outer iterations in the solver
nochange_steps : int (default = 1000)
We monitor how much our stopping criteria changes during outer
iterations. If it does not change (changes less then 1e-3*tol)
for nochange_steps consecutive steps, then we stop training.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
epsilon: float (default = 0.1)
epsilon parameter of the epsiron-SVR model. There is no penalty
associated to points that are predicted within the epsilon-tube
around the target values.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
n_support_ : int
The total number of support vectors. Note: this will change in the
future to represent number support vectors for each class (like
in Sklearn, see Issue #956)
support_ : int, shape = [n_support]
Device array of support vector indices
support_vectors_ : float, shape [n_support, n_cols]
Device array of support vectors
dual_coef_ : float, shape = [1, n_support]
Device array of coefficients for support vectors
intercept_ : float
The constant in the decision function
fit_status_ : int
0 if SVM is correctly fitted
coef_ : float, shape [1, n_cols]
Only available for linear kernels. It is the normal of the
hyperplane.
``coef_ = sum_k=1..n_support dual_coef_[k] * support_vectors[k,:]``
Notes
-----
For additional docs, see `scikitlearn's SVC
<https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html>`_.
References
----------
.. [1] J. Vanek et al. A GPU-Architecture Optimized Hierarchical
Decomposition Algorithm for Support VectorMachine Training, IEEE
Transactions on Parallel and Distributed Systems, vol 28, no 12, 3330,
(2017)
.. [2] `Z. Wen et al. ThunderSVM: A Fast SVM Library on GPUs and CPUs,
Journal of Machine Learning Research, 19, 1-5 (2018)
<https://github.com/Xtra-Computing/thundersvm>`_
"""
dual_coef_ = CumlArrayDescriptor()
support_ = CumlArrayDescriptor()
support_vectors_ = CumlArrayDescriptor()
_intercept_ = CumlArrayDescriptor()
_internal_coef_ = CumlArrayDescriptor()
_unique_labels_ = CumlArrayDescriptor()
def __init__(self, *, handle=None, C=1, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, tol=1e-3, cache_size=1024.0,
max_iter=-1, nochange_steps=1000, verbose=False,
epsilon=0.1, output_type=None):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
# Input parameters for training
self.tol = tol
self.C = C
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.cache_size = cache_size
self.max_iter = max_iter
self.nochange_steps = nochange_steps
self.epsilon = epsilon
self.svmType = None # Child class should set self.svmType
# Parameter to indicate if model has been correctly fitted
# fit_status == -1 indicates that the model is not yet fitted
self._fit_status_ = -1
# Attributes (parameters of the fitted model)
self.dual_coef_ = None
self.support_ = None
self.support_vectors_ = None
self._intercept_ = None
self.n_support_ = None
self._c_kernel = self._get_c_kernel(kernel)
self._gamma_val = None # the actual numerical value used for training
self.coef_ = None # value of the coef_ attribute, only for lin kernel
self.dtype = None
self._model = None # structure of the model parameters
self._freeSvmBuffers = False # whether to call the C++ lib for cleanup
if (kernel == 'linear' or (kernel == 'poly' and degree == 1)) \
and not getattr(type(self), "_linear_kernel_warned", False):
setattr(type(self), "_linear_kernel_warned", True)
cname = type(self).__name__
warn(f'{cname} with the linear kernel can be much faster using '
f'the specialized solver provided by Linear{cname}. Consider '
f'switching to Linear{cname} if tranining takes too long.')
def __del__(self):
self._dealloc()
def _dealloc(self):
# deallocate model parameters
cdef SvmModel[float] *model_f
cdef SvmModel[double] *model_d
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if self._model is not None:
if self.dtype == np.float32:
model_f = <SvmModel[float]*><uintptr_t> self._model
if self._freeSvmBuffers:
svmFreeBuffers(handle_[0], model_f[0])
del model_f
elif self.dtype == np.float64:
model_d = <SvmModel[double]*><uintptr_t> self._model
if self._freeSvmBuffers:
svmFreeBuffers(handle_[0], model_d[0])
del model_d
else:
raise TypeError("Unknown type for SVC class")
try:
del self._fit_status_
except AttributeError:
pass
self._model = None
def _get_c_kernel(self, kernel):
"""
Get KernelType from the kernel string.
Parameters
----------
kernel: string, ('linear', 'poly', 'rbf', or 'sigmoid')
"""
return {
'linear': LINEAR,
'poly': POLYNOMIAL,
'rbf': RBF,
'sigmoid': TANH
}[kernel]
def _calc_gamma_val(self, X):
"""
Calculate the value for gamma kernel parameter.
Parameters
----------
X: array like
Array of training vectors. The 'auto' and 'scale' gamma options
derive the numerical value of the gamma parameter from X.
"""
if type(self.gamma) is str:
if self.gamma == 'auto':
return 1 / self.n_cols
elif self.gamma == 'scale':
if isinstance(X, SparseCumlArray):
# account for zero values
data_cupy = cupy.asarray(X.data).copy()
num_elements = self.n_cols * self.n_rows
extended_mean = data_cupy.mean()*X.nnz/num_elements
data_cupy = (data_cupy - extended_mean)**2
x_var = (data_cupy.sum() + (num_elements-X.nnz)*extended_mean*extended_mean)/num_elements
else:
x_var = cupy.asarray(X).var().item()
return 1 / (self.n_cols * x_var)
else:
raise ValueError("Not implemented gamma option: " + self.gamma)
else:
return self.gamma
def _calc_coef(self):
if (self.n_support_ == 0):
return cupy.zeros((1, self.n_cols), dtype=self.dtype)
with using_output_type("cupy"):
return cupy.dot(self.dual_coef_, self.support_vectors_)
def _check_is_fitted(self, attr):
if not hasattr(self, attr) or (getattr(self, attr) is None):
msg = ("This classifier instance is not fitted yet. Call 'fit' "
"with appropriate arguments before using this estimator.")
raise NotFittedError(msg)
@property
@cuml.internals.api_base_return_array_skipall
def coef_(self):
if self._c_kernel != LINEAR:
raise AttributeError("coef_ is only available for linear kernels")
if self._model is None:
raise AttributeError("Call fit before prediction")
if self._internal_coef_ is None:
self._internal_coef_ = self._calc_coef()
# Call the base class to perform the output conversion
return self._internal_coef_
@coef_.setter
def coef_(self, value):
self._internal_coef_ = value
@property
@cuml.internals.api_base_return_array_skipall
def intercept_(self):
if self._intercept_ is None:
raise AttributeError("intercept_ called before fit.")
return self._intercept_
@intercept_.setter
def intercept_(self, value):
self._intercept_ = value
def _get_kernel_params(self, X=None):
""" Wrap the kernel parameters in a KernelParams obtect """
cdef KernelParams _kernel_params
if X is not None:
self._gamma_val = self._calc_gamma_val(X)
_kernel_params.kernel = self._c_kernel
_kernel_params.degree = self.degree
_kernel_params.gamma = self._gamma_val
_kernel_params.coef0 = self.coef0
return _kernel_params
def _get_svm_params(self):
""" Wrap the training parameters in an SvmParameter obtect """
cdef SvmParameter param
param.C = self.C
param.cache_size = self.cache_size
param.max_iter = self.max_iter
param.nochange_steps = self.nochange_steps
param.tol = self.tol
param.verbosity = self.verbose
param.epsilon = self.epsilon
param.svmType = self.svmType
return param
@cuml.internals.api_base_return_any_skipall
def _get_svm_model(self):
""" Wrap the fitted model parameters into an SvmModel structure.
This is used if the model is loaded by pickle, the self._model struct
that we can pass to the predictor.
"""
cdef SvmModel[float] *model_f
cdef SvmModel[double] *model_d
if self.dual_coef_ is None:
# the model is not fitted in this case
return None
if self.dtype == np.float32:
model_f = new SvmModel[float]()
model_f.n_support = self.n_support_
model_f.n_cols = self.n_cols
model_f.b = self._intercept_.item()
model_f.dual_coefs = \
<float*><size_t>self.dual_coef_.ptr
if isinstance(self.support_vectors_, SparseCumlArray):
model_f.support_matrix.nnz = self.support_vectors_.nnz
model_f.support_matrix.indptr = <int*><uintptr_t>self.support_vectors_.indptr.ptr
model_f.support_matrix.indices = <int*><uintptr_t>self.support_vectors_.indices.ptr
model_f.support_matrix.data = <float*><uintptr_t>self.support_vectors_.data.ptr
else:
model_f.support_matrix.data = <float*><uintptr_t>self.support_vectors_.ptr
model_f.support_idx = \
<int*><uintptr_t>self.support_.ptr
model_f.n_classes = self.n_classes_
if self.n_classes_ > 0:
model_f.unique_labels = \
<float*><uintptr_t>self._unique_labels_.ptr
else:
model_f.unique_labels = NULL
return <uintptr_t>model_f
else:
model_d = new SvmModel[double]()
model_d.n_support = self.n_support_
model_d.n_cols = self.n_cols
model_d.b = self._intercept_.item()
model_d.dual_coefs = \
<double*><size_t>self.dual_coef_.ptr
if isinstance(self.support_vectors_, SparseCumlArray):
model_d.support_matrix.nnz = self.support_vectors_.nnz
model_d.support_matrix.indptr = <int*><uintptr_t>self.support_vectors_.indptr.ptr
model_d.support_matrix.indices = <int*><uintptr_t>self.support_vectors_.indices.ptr
model_d.support_matrix.data = <double*><uintptr_t>self.support_vectors_.data.ptr
else:
model_d.support_matrix.data = <double*><uintptr_t>self.support_vectors_.ptr
model_d.support_idx = \
<int*><uintptr_t>self.support_.ptr
model_d.n_classes = self.n_classes_
if self.n_classes_ > 0:
model_d.unique_labels = \
<double*><uintptr_t>self._unique_labels_.ptr
else:
model_d.unique_labels = NULL
return <uintptr_t>model_d
def _unpack_svm_model(self, b, n_support, dual_coefs, support_idx, nnz, indptr, indices, data, n_classes, unique_labels):
self._intercept_ = CumlArray.full(1, b, self.dtype)
self.n_support_ = n_support
if n_support > 0:
self.dual_coef_ = CumlArray(
data=dual_coefs,
shape=(1, self.n_support_),
dtype=self.dtype,
order='F')
self.support_ = CumlArray(
data=support_idx,
shape=(self.n_support_,),
dtype=np.int32,
order='F')
if nnz == -1:
self.support_vectors_ = CumlArray(
data=data,
shape=(self.n_support_, self.n_cols),
dtype=self.dtype,
order='F')
else:
indptr = CumlArray(data=indptr,
shape=(self.n_support_ + 1,),
dtype=np.int32,
order='F')
indices = CumlArray(data=indices,
shape=(nnz,),
dtype=np.int32,
order='F')
data = CumlArray(data=data,
shape=(nnz,),
dtype=self.dtype,
order='F')
sparse_input = SparseCumlArrayInput(
dtype=self.dtype,
indptr=indptr,
indices=indices,
data=data,
nnz=nnz,
shape=(self.n_support_, self.n_cols))
self.support_vectors_ = SparseCumlArray(data=sparse_input)
self.n_classes_ = n_classes
if self.n_classes_ > 0:
self._unique_labels_ = CumlArray(
data=unique_labels,
shape=(self.n_classes_,),
dtype=self.dtype,
order='F')
else:
self._unique_labels_ = None
def _unpack_model(self):
""" Expose the model parameters as attributes """
cdef SvmModel[float] *model_f
cdef SvmModel[double] *model_d
# Mark that the C++ layer should free the parameter vectors
# If we could pass the deviceArray deallocator as finalizer for the
# device_array_from_ptr function, then this would not be necessary.
self._freeSvmBuffers = True
if self.dtype == np.float32:
model_f = <SvmModel[float]*><uintptr_t> self._model
self._unpack_svm_model(
model_f.b,
model_f.n_support,
<uintptr_t>model_f.dual_coefs,
<uintptr_t>model_f.support_idx,
model_f.support_matrix.nnz,
<uintptr_t>model_f.support_matrix.indptr,
<uintptr_t>model_f.support_matrix.indices,
<uintptr_t>model_f.support_matrix.data,
model_f.n_classes,
<uintptr_t> model_f.unique_labels)
else:
model_d = <SvmModel[double]*><uintptr_t> self._model
self._unpack_svm_model(
model_d.b,
model_d.n_support,
<uintptr_t>model_d.dual_coefs,
<uintptr_t>model_d.support_idx,
model_d.support_matrix.nnz,
<uintptr_t>model_d.support_matrix.indptr,
<uintptr_t>model_d.support_matrix.indices,
<uintptr_t>model_d.support_matrix.data,
model_d.n_classes,
<uintptr_t> model_d.unique_labels)
if self.n_support_ == 0:
self.dual_coef_ = CumlArray.empty(
shape=(1, 0),
dtype=self.dtype,
order='F')
self.support_ = CumlArray.empty(
shape=(0,),
dtype=np.int32,
order='F')
# Setting all dims to zero due to issue
# https://github.com/rapidsai/cuml/issues/4095
self.support_vectors_ = CumlArray.empty(
shape=(0, 0),
dtype=self.dtype,
order='F')
def predict(self, X, predict_class, convert_dtype=True) -> CumlArray:
"""
Predicts the y for X, where y is either the decision function value
(if predict_class == False), or the label associated with X.
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Dense matrix (floats or doubles) of shape (n_samples, n_features).
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
predict_class : boolean
Switch whether to return class label (true), or decision function
value (false).
Returns
-------
y : cuDF Series
Dense vector (floats or doubles) of shape (n_samples, 1)
"""
if predict_class:
out_dtype = self._get_target_dtype()
else:
out_dtype = self.dtype
cuml.internals.set_api_output_dtype(out_dtype)
self._check_is_fitted('_model')
_array_type, is_sparse = determine_array_type_full(X)
if is_sparse:
X_m = SparseCumlArray(X)
n_rows = X_m.shape[0]
n_cols = X_m.shape[1]
pred_dtype = X_m.dtype
else:
X_m, n_rows, n_cols, pred_dtype = \
input_to_cuml_array(
X,
check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype else None))
preds = CumlArray.zeros(n_rows, dtype=self.dtype, index=X_m.index)
cdef uintptr_t preds_ptr = preds.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef SvmModel[float]* model_f
cdef SvmModel[double]* model_d
cdef int X_rows = n_rows
cdef int X_cols = n_cols
cdef int X_nnz = X_m.nnz if is_sparse else n_rows * n_cols
cdef uintptr_t X_indptr = X_m.indptr.ptr if is_sparse else X_m.ptr
cdef uintptr_t X_indices = X_m.indices.ptr if is_sparse else X_m.ptr
cdef uintptr_t X_data = X_m.data.ptr if is_sparse else X_m.ptr
if self.dtype == np.float32:
model_f = <SvmModel[float]*><size_t> self._model
if is_sparse:
svcPredictSparse(handle_[0], <int*>X_indptr, <int*>X_indices,
<float*>X_data, X_rows, X_cols, X_nnz,
self._get_kernel_params(), model_f[0],
<float*>preds_ptr, <float>self.cache_size,
<bool> predict_class)
else:
svcPredict(handle_[0], <float*>X_data, n_rows, n_cols,
self._get_kernel_params(), model_f[0],
<float*>preds_ptr, <float>self.cache_size,
<bool> predict_class)
else:
model_d = <SvmModel[double]*><size_t> self._model
if is_sparse:
svcPredictSparse(handle_[0], <int*>X_indptr, <int*>X_indices,
<double*>X_data, X_rows, X_cols, X_nnz,
self._get_kernel_params(), model_d[0],
<double*>preds_ptr, <double>self.cache_size,
<bool> predict_class)
else:
svcPredict(handle_[0], <double*>X_data, n_rows, n_cols,
self._get_kernel_params(), model_d[0],
<double*>preds_ptr, <double>self.cache_size,
<bool> predict_class)
self.handle.sync()
del X_m
return preds
def get_param_names(self):
return super().get_param_names() + [
"C",
"kernel",
"degree",
"gamma",
"coef0",
"tol",
"cache_size",
"max_iter",
"nochange_steps",
"epsilon",
]
def __getstate__(self):
state = self.__dict__.copy()
del state['handle']
del state['_model']
return state
def __setstate__(self, state):
super(SVMBase, self).__init__(handle=None,
verbose=state['verbose'])
self.__dict__.update(state)
self._model = self._get_svm_model()
self._freeSvmBuffers = False
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/svm/__init__.py | #
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.svm.svc import SVC
from cuml.svm.svr import SVR
from cuml.svm.linear_svc import LinearSVC
from cuml.svm.linear_svr import LinearSVR
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/svm/linear_svc.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.mixins import ClassifierMixin
from cuml.svm.linear import LinearSVM, LinearSVM_defaults # noqa: F401
from cuml.svm.svc import apply_class_weight
__all__ = ["LinearSVC"]
class LinearSVC(LinearSVM, ClassifierMixin):
"""
LinearSVC (Support Vector Classification with the linear kernel)
Construct a linear SVM classifier for training and predictions.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> from cuml.svm import LinearSVC
>>> X = cp.array([[1,1], [2,1], [1,2], [2,2], [1,3], [2,3]],
... dtype=cp.float32);
>>> y = cp.array([0, 0, 1, 0, 1, 1], dtype=cp.float32)
>>> clf = LinearSVC(loss='squared_hinge', penalty='l1', C=1)
>>> clf.fit(X, y)
LinearSVC()
>>> print("Predicted labels:", clf.predict(X))
Predicted labels: [0. 0. 1. 0. 1. 1.]
Parameters
----------
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
penalty : {{'l1', 'l2'}} (default = '{LinearSVM_defaults.penalty}')
The regularization term of the target function.
loss : {LinearSVC.REGISTERED_LOSSES} (default = 'squared_hinge')
The loss term of the target function.
fit_intercept : {LinearSVM_defaults.fit_intercept.__class__.__name__ \
} (default = {LinearSVM_defaults.fit_intercept})
Whether to fit the bias term. Set to False if you expect that the
data is already centered.
penalized_intercept : { \
LinearSVM_defaults.penalized_intercept.__class__.__name__ \
} (default = {LinearSVM_defaults.penalized_intercept})
When true, the bias term is treated the same way as other features;
i.e. it's penalized by the regularization term of the target function.
Enabling this feature forces an extra copying the input data X.
max_iter : {LinearSVM_defaults.max_iter.__class__.__name__ \
} (default = {LinearSVM_defaults.max_iter})
Maximum number of iterations for the underlying solver.
linesearch_max_iter : { \
LinearSVM_defaults.linesearch_max_iter.__class__.__name__ \
} (default = {LinearSVM_defaults.linesearch_max_iter})
Maximum number of linesearch (inner loop) iterations for
the underlying (QN) solver.
lbfgs_memory : { \
LinearSVM_defaults.lbfgs_memory.__class__.__name__ \
} (default = {LinearSVM_defaults.lbfgs_memory})
Number of vectors approximating the hessian for the underlying QN
solver (l-bfgs).
class_weight : dict or string (default=None)
Weights to modify the parameter C for class i to class_weight[i]*C. The
string 'balanced' is also accepted, in which case ``class_weight[i] =
n_samples / (n_classes * n_samples_of_class[i])``
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
C : {LinearSVM_defaults.C.__class__.__name__ \
} (default = {LinearSVM_defaults.C})
The constant scaling factor of the loss term in the target formula
`F(X, y) = penalty(X) + C * loss(X, y)`.
grad_tol : {LinearSVM_defaults.grad_tol.__class__.__name__ \
} (default = {LinearSVM_defaults.grad_tol})
The threshold on the gradient for the underlying QN solver.
change_tol : {LinearSVM_defaults.change_tol.__class__.__name__ \
} (default = {LinearSVM_defaults.change_tol})
The threshold on the function change for the underlying QN solver.
tol : Optional[float] (default = None)
Tolerance for the stopping criterion.
This is a helper transient parameter that, when present, sets both
`grad_tol` and `change_tol` to the same value. When any of the two
`***_tol` parameters are passed as well, they take the precedence.
probability: {LinearSVM_defaults.probability.__class__.__name__ \
} (default = {LinearSVM_defaults.probability})
Enable or disable probability estimates.
multi_class : {{currently, only 'ovr'}} (default = 'ovr')
Multiclass classification strategy. ``'ovo'`` uses `OneVsOneClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsOneClassifier.html>`_
while ``'ovr'`` selects `OneVsRestClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html>`_
output_type : {{'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
intercept_ : float, shape (`n_classes_`,)
The constant in the decision function
coef_ : float, shape (`n_classes_`, n_cols)
The vectors defining the hyperplanes that separate the classes.
classes_ : float, shape (`n_classes_`,)
Array of class labels.
probScale_ : float, shape (`n_classes_`, 2)
Probability calibration constants (for the probabolistic output).
n_classes_ : int
Number of classes
Notes
-----
The model uses the quasi-newton (QN) solver to find the solution in the
primal space. Thus, in contrast to generic :class:`SVC<cuml.svm.SVC>`
model, it does not compute the support coefficients/vectors.
Check the solver's documentation for more details
:class:`Quasi-Newton (L-BFGS/OWL-QN)<cuml.QN>`.
For additional docs, see `scikitlearn's LinearSVC
<https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html>`_.
"""
REGISTERED_LOSSES = set(["hinge", "squared_hinge"])
def __init__(self, **kwargs):
# NB: the keyword arguments are filtered in python/cuml/svm/linear.pyx
# the default parameter values are reexported from
# cpp/include/cuml/svm/linear.hpp
# set classification-specific defaults
if "loss" not in kwargs:
kwargs["loss"] = "squared_hinge"
if "multi_class" not in kwargs:
# 'multi_class' is a real parameter here
# 'multiclass_strategy' is an ephemeral compatibility parameter
# for easier switching between
# sklearn.LinearSVC <-> cuml.LinearSVC <-> cuml.SVC
kwargs["multi_class"] = kwargs.pop("multiclass_strategy", "ovr")
super().__init__(**kwargs)
@property
def loss(self):
return self.__loss
@loss.setter
def loss(self, loss: str):
if loss not in self.REGISTERED_LOSSES:
raise ValueError(
f"Classification loss type "
f"must be one of {self.REGISTERED_LOSSES}, "
f"but given '{loss}'."
)
self.__loss = loss
def get_param_names(self):
return list(
{
"handle",
"class_weight",
"verbose",
"penalty",
"loss",
"fit_intercept",
"penalized_intercept",
"probability",
"max_iter",
"linesearch_max_iter",
"lbfgs_memory",
"C",
"grad_tol",
"change_tol",
"multi_class",
}.union(super().get_param_names())
)
def fit(self, X, y, sample_weight=None, convert_dtype=True) -> "LinearSVM":
sample_weight = apply_class_weight(
self.handle,
sample_weight,
self.class_weight,
y,
self.verbose,
self.output_type,
X.dtype,
)
return super(LinearSVC, self).fit(X, y, sample_weight, convert_dtype)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/experimental/__init__.py | from cuml.experimental.fil import ForestInference
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental | rapidsai_public_repos/cuml/python/cuml/experimental/fil/postprocessing.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
cdef extern from "cuml/experimental/fil/postproc_ops.hpp" namespace "ML::experimental::fil" nogil:
cdef enum row_op:
row_disable "ML::experimental::fil::row_op::disable",
softmax "ML::experimental::fil::row_op::softmax",
max_index "ML::experimental::fil::row_op::max_index"
cdef enum element_op:
elem_disable "ML::experimental::fil::element_op::disable",
signed_square "ML::experimental::fil::element_op::signed_square",
hinge "ML::experimental::fil::element_op::hinge",
sigmoid "ML::experimental::fil::element_op::sigmoid",
exponential "ML::experimental::fil::element_op::exponential",
logarithm_one_plus_exp "ML::experimental::fil::element_op::logarithm_one_plus_exp"
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental | rapidsai_public_repos/cuml/python/cuml/experimental/fil/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("fil.pyx" ${fil_algo} ${randomforestclassifier_algo} ${randomforestregressor_algo})
set(linked_libraries
"${cuml_sg_libraries}"
"${CUML_PYTHON_TREELITE_TARGET}"
)
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}"
MODULE_PREFIX experimental_fil_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental | rapidsai_public_repos/cuml/python/cuml/experimental/fil/README.md | # Experimental FIL - RAPIDS Forest Inference Library
This experimental feature offers a new implementation of cuML's existing
Forest Inference Library. The primary advantages of this new
implementation are:
1. Models can now be evaluated on CPU in addition to GPU.
2. Faster GPU execution on some models and hardware.
3. Support for a wider range of Treelite's available model parameters.
In addition, there are a few limitations of this implementation,
including:
1. Models with shallow trees (depth 2-4) typically execute slower than with
existing FIL.
2. This implementation has not been as exhaustively tested as the existing
FIL.
If you need to absolutely maximize runtime performance, it is
recommended that you test both the new and existing FIL implementations with
realistic batch sizes on your target hardware to determine which is optimal
for your specific model. Generally, however performance should be quite
comparable for both implementations.
**NOTE:** Because this implementation is relatively recent, it is recommended
that for use cases where stability is paramount, the existing FIL
implementation be used.
## Usage
With one exception, experimental FIL should be fully compatible with the
existing FIL API. Experimental FIL no longer allows a `threshold` to be
specified at the time a model is loaded for binary classifiers. Instead, the
threshold must be passed as a keyword argument to the `predict` method.
Besides this, all existing FIL calls should be compatible with experimental
FIL. There are, however, several performance parameters which have been
deprecated (will now emit a warning) and a few new ones which have been added.
The most basic usage remains the same:
```python
from cuml.experimental import ForestInference
fm = ForestInference.load(filename=model_path,
output_class=True,
model_type='xgboost')
X = ... load test samples as a numpy or cupy array ...
y_out = fm.predict(X)
```
In order to optimize performance, however, we introduce a new optional
parameter to the `predict` method called `chunk_size`:
```python
y_out = fm.predict(X, chunk_size=4)
```
The API docs cover `chunk_size` in more detail, but this parameter controls
how many rows within a batch are simultaneously evaluated during a single
iteration of FIL's inference algorithm. The optimal value for this
parameter depends on both the model and available hardware, and it is
difficult to predict _a priori_. In general, however, larger batches benefit
from larger `chunk_size` values, and smaller batches benefit from smaller
`chunk_size` values.
For GPU execution, `chunk_size` can be any power of 2 from 1 to 32. For CPU
execution, `chunk_size` can be any power of 2, but there is generally no
benefit in testing values over 512. On both CPU and GPU, there is never
any benefit from a chunk size that exceeds the batch size. Tuning the
chunk size can substantially improve performance, so it is often worthwhile
to perform a search over chunk sizes with sample data when deploying a model
with FIL.
### Loading Parameters
In addition to the `chunk_size` parameter for the `predict` and
`predict_proba` methods, FIL offers some parameters for optimizing
performance when the model is loaded. This implementation also
deprecates some existing parameters.
#### Deprecated `load` Parameters
- `threshold` (will raise a `DeprecationWarning` if used)
- `algo` (ignored, but a warning will be logged)
- `storage_type` (ignored, but a warning will be logged)
- `blocks_per_sm` (ignored, but a warning will be logged)
- `threads_per_tree` (ignored, but a warning will be logged)
- `n_items` (ignored, but a warning will be logged)
- `compute_shape_str` (ignored, but a warning will be logged)
#### New `load` Parameters
- `layout`: Replaces the functionality of `algo` and specifies the in-memory
layout of nodes in FIL forests. One of `'depth_first'` (default) or
`'breadth_first'`. Except in cases where absolutely optimal
performance is critical, the default should be acceptable.
- `align_bytes`: If specified, trees will be padded such that their in-memory
size is a multiple of this value. Theoretically, this can improve
performance by guaranteeing that memory reads from trees begin on a cache
line boundary. Empirically, little benefit has been observed for this
parameter, and it may be deprecated before this version of FIL moves out of
experimental status.
#### Optimizing `load` parameters
While these two new parameters have been provided for cases in which it is
necessary to eke out every possible performance gain for a model, in general
the performance benefit will be tiny relative to the benefit of
optimizing `chunk_size` for predict calls.
## Future Development
Once experimental FIL has been thoroughly tested and evaluated in real-world
deployments, it will be moved out of experimental status and replace the
existing FIL implementation. Before this happens, RAPIDS developers will
also address the current underperformance of experimental FIL on shallow
trees to ensure performance parity.
While this version of FIL remains in experimental status, feedback is very
much welcome. Please consider [submitting an
issue](https://github.com/rapidsai/cuml/issues/new/choose) if you notice
any performance regression when transitioning from the current FIL, have
thoughts on how to make the API more useful, or have features you
would like to see in the new version of FIL before it transitions out of
experimental.
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental | rapidsai_public_repos/cuml/python/cuml/experimental/fil/tree_layout.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
cdef extern from "cuml/experimental/fil/tree_layout.hpp" namespace "ML::experimental::fil" nogil:
cdef enum tree_layout:
depth_first "ML::experimental::fil::tree_layout::depth_first",
breadth_first "ML::experimental::fil::tree_layout::breadth_first"
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental | rapidsai_public_repos/cuml/python/cuml/experimental/fil/fil.pyx | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import itertools
import numpy as np
import pathlib
import treelite.sklearn
import warnings
from libcpp cimport bool
from libc.stdint cimport uint32_t, uintptr_t
from cuml.common.device_selection import using_device_type
from cuml.internals.input_utils import input_to_cuml_array
from cuml.internals.safe_imports import (
gpu_only_import_from,
null_decorator
)
from cuml.internals.array import CumlArray
from cuml.internals.mixins import CMajorInputTagMixin
from cuml.experimental.fil.postprocessing cimport element_op, row_op
from cuml.experimental.fil.infer_kind cimport infer_kind
from cuml.experimental.fil.tree_layout cimport tree_layout as fil_tree_layout
from cuml.experimental.fil.detail.raft_proto.cuda_stream cimport cuda_stream as raft_proto_stream_t
from cuml.experimental.fil.detail.raft_proto.device_type cimport device_type as raft_proto_device_t
from cuml.experimental.fil.detail.raft_proto.handle cimport handle_t as raft_proto_handle_t
from cuml.experimental.fil.detail.raft_proto.optional cimport optional, nullopt
from cuml.internals import set_api_output_dtype
from cuml.internals.base import UniversalBase
from cuml.internals.device_type import DeviceType, DeviceTypeError
from cuml.internals.global_settings import GlobalSettings
from cuml.internals.mem_type import MemoryType
from pylibraft.common.handle cimport handle_t as raft_handle_t
from time import perf_counter
nvtx_annotate = gpu_only_import_from('nvtx', 'annotate', alt=null_decorator)
from cuml.internals.safe_imports import (
gpu_only_import_from,
null_decorator
)
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
cdef extern from "treelite/c_api.h":
ctypedef void* ModelHandle
cdef raft_proto_device_t get_device_type(arr):
cdef raft_proto_device_t dev
if arr.is_device_accessible:
if (
GlobalSettings().device_type == DeviceType.host
and arr.is_host_accessible
):
dev = raft_proto_device_t.cpu
else:
dev = raft_proto_device_t.gpu
else:
dev = raft_proto_device_t.cpu
return dev
cdef extern from "cuml/experimental/fil/forest_model.hpp" namespace "ML::experimental::fil":
cdef cppclass forest_model:
void predict[io_t](
const raft_proto_handle_t&,
io_t*,
io_t*,
size_t,
raft_proto_device_t,
raft_proto_device_t,
infer_kind,
optional[uint32_t]
) except +
bool is_double_precision() except +
size_t num_features() except +
size_t num_outputs() except +
size_t num_trees() except +
bool has_vector_leaves() except +
row_op row_postprocessing() except +
element_op elem_postprocessing() except +
cdef extern from "cuml/experimental/fil/treelite_importer.hpp" namespace "ML::experimental::fil":
forest_model import_from_treelite_handle(
ModelHandle,
fil_tree_layout,
uint32_t,
optional[bool],
raft_proto_device_t,
int,
raft_proto_stream_t
) except +
cdef class ForestInference_impl():
cdef forest_model model
cdef raft_proto_handle_t raft_proto_handle
cdef object raft_handle
def __cinit__(
self,
raft_handle,
tl_model,
*,
layout='breadth_first',
align_bytes=0,
use_double_precision=None,
mem_type=None,
device_id=0):
# Store reference to RAFT handle to control lifetime, since raft_proto
# handle keeps a pointer to it
self.raft_handle = raft_handle
self.raft_proto_handle = raft_proto_handle_t(
<raft_handle_t*><size_t>self.raft_handle.getHandle()
)
if mem_type is None:
mem_type = GlobalSettings().memory_type
else:
mem_type = MemoryType.from_str(mem_type)
cdef optional[bool] use_double_precision_c
cdef bool use_double_precision_bool
if use_double_precision is None:
use_double_precision_c = nullopt
else:
use_double_precision_bool = use_double_precision
use_double_precision_c = use_double_precision_bool
try:
model_handle = tl_model.handle.value
except AttributeError:
try:
model_handle = tl_model.handle
except AttributeError:
try:
model_handle = tl_model.value
except AttributeError:
model_handle = tl_model
cdef raft_proto_device_t dev_type
if mem_type.is_device_accessible:
dev_type = raft_proto_device_t.gpu
else:
dev_type = raft_proto_device_t.cpu
cdef fil_tree_layout tree_layout
if layout.lower() == 'breadth_first':
tree_layout = fil_tree_layout.breadth_first
else:
tree_layout = fil_tree_layout.depth_first
self.model = import_from_treelite_handle(
<ModelHandle><uintptr_t>model_handle,
tree_layout,
align_bytes,
use_double_precision_c,
dev_type,
device_id,
self.raft_proto_handle.get_next_usable_stream()
)
def get_dtype(self):
return [np.float32, np.float64][self.model.is_double_precision()]
def num_features(self):
return self.model.num_features()
def num_outputs(self):
return self.model.num_outputs()
def num_trees(self):
return self.model.num_trees()
def row_postprocessing(self):
enum_val = self.model.row_postprocessing()
if enum_val == row_op.row_disable:
return "disable"
elif enum_val == row_op.softmax:
return "softmax"
elif enum_val == row_op.max_index:
return "max_index"
def elem_postprocessing(self):
enum_val = self.model.elem_postprocessing()
if enum_val == element_op.elem_disable:
return "disable"
elif enum_val == element_op.signed_square:
return "signed_square"
elif enum_val == element_op.hinge:
return "hinge"
elif enum_val == element_op.sigmoid:
return "sigmoid"
elif enum_val == element_op.exponential:
return "exponential"
elif enum_val == element_op.logarithm_one_plus_exp:
return "logarithm_one_plus_exp"
def _predict(
self,
X,
*,
predict_type="default",
preds=None,
chunk_size=None,
output_dtype=None):
set_api_output_dtype(output_dtype)
model_dtype = self.get_dtype()
cdef uintptr_t in_ptr
in_arr, n_rows, _, _ = input_to_cuml_array(
X,
order='C',
convert_to_dtype=model_dtype,
check_dtype=model_dtype
)
cdef raft_proto_device_t in_dev
in_dev = get_device_type(in_arr)
in_ptr = in_arr.ptr
cdef uintptr_t out_ptr
cdef infer_kind infer_type_enum
if predict_type == "default":
infer_type_enum = infer_kind.default_kind
output_shape = (n_rows, self.model.num_outputs())
elif predict_type == "per_tree":
infer_type_enum = infer_kind.per_tree
if self.model.has_vector_leaves():
output_shape = (n_rows, self.model.num_trees(), self.model.num_outputs())
else:
output_shape = (n_rows, self.model.num_trees())
elif predict_type == "leaf_id":
infer_type_enum = infer_kind.leaf_id
output_shape = (n_rows, self.model.num_trees())
else:
raise ValueError(f"Unrecognized predict_type: {predict_type}")
if preds is None:
preds = CumlArray.empty(
output_shape,
model_dtype,
order='C',
index=in_arr.index
)
else:
# TODO(wphicks): Handle incorrect dtype/device/layout in C++
if preds.shape != output_shape:
raise ValueError(f"If supplied, preds argument must have shape {output_shape}")
preds.index = in_arr.index
cdef raft_proto_device_t out_dev
out_dev = get_device_type(preds)
out_ptr = preds.ptr
cdef optional[uint32_t] chunk_specification
if chunk_size is None:
chunk_specification = nullopt
else:
chunk_specification = <uint32_t> chunk_size
if model_dtype == np.float32:
self.model.predict[float](
self.raft_proto_handle,
<float*> out_ptr,
<float*> in_ptr,
n_rows,
out_dev,
in_dev,
infer_type_enum,
chunk_specification
)
else:
self.model.predict[double](
self.raft_proto_handle,
<double*> out_ptr,
<double*> in_ptr,
n_rows,
in_dev,
out_dev,
infer_type_enum,
chunk_specification
)
self.raft_proto_handle.synchronize()
return preds
def predict(
self,
X,
*,
predict_type="default",
preds=None,
chunk_size=None,
output_dtype=None):
return self._predict(
X,
predict_type=predict_type,
preds=preds,
chunk_size=chunk_size,
output_dtype=output_dtype
)
class _AutoIterations:
"""Used to generate sequence of iterations (1, 2, 5, 10, 20, 50...) during
FIL optimization"""
def __init__(self):
self.invocations = 0
self.sequence = (1, 2, 5)
def next(self):
result = (
(10 ** (
self.invocations // len(self.sequence)
)) * self.sequence[self.invocations % len(self.sequence)]
)
self.invocations += 1
return result
def _handle_legacy_fil_args(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if kwargs.get('threshold', None) is not None:
raise FutureWarning(
'Parameter "threshold" has been deprecated.'
' To use a threshold for binary classification, pass'
' the "threshold" keyword directly to the predict method.'
)
if kwargs.get('algo', None) is not None:
warnings.warn(
'Parameter "algo" has been deprecated. Its use is no longer'
' necessary to achieve optimal performance with FIL.',
FutureWarning
)
if kwargs.get('storage_type', None) is not None:
warnings.warn(
'Parameter "storage_type" has been deprecated. The correct'
' storage type will be used automatically.',
FutureWarning
)
if kwargs.get('blocks_per_sm', None) is not None:
warnings.warn(
'Parameter "blocks_per_sm" has been deprecated. Its use is no'
' longer necessary to achieve optimal performance with FIL.',
FutureWarning
)
if kwargs.get('threads_per_tree', None) is not None:
warnings.warn(
'Parameter "threads_per_tree" has been deprecated. Pass'
' the "chunk_size" keyword argument to the predict method for'
' equivalent functionality.',
FutureWarning
)
if kwargs.get('n_items', None) is not None:
warnings.warn(
'Parameter "n_items" has been deprecated. Its use is no'
' longer necessary to achieve optimal performance with FIL.',
FutureWarning
)
if kwargs.get('compute_shape_str', None) is not None:
warnings.warn(
'Parameter "compute_shape_str" has been deprecated.',
FutureWarning
)
return func(*args, **kwargs)
return wrapper
class ForestInference(UniversalBase, CMajorInputTagMixin):
"""
ForestInference provides accelerated inference for forest models on both
CPU and GPU.
This experimental implementation
(`cuml.experimental.ForestInference`) of ForestInference is similar to the
original (`cuml.ForestInference`) FIL, but it also offers CPU
execution and in some cases superior performance for GPU execution.
Note: This is an experimental feature. Although it has been
extensively reviewed and tested, it has not been as thoroughly evaluated
as the original FIL. For maximum stability, we recommend using the
original FIL until this implementation moves out of experimental.
In general, the experimental implementation tends to underperform
the existing implementation on shallow trees but otherwise tends to offer
comparable or superior performance. Which implementation offers the best
performance depends on a range of factors including hardware and details of
the individual model, so for now it is recommended that users test both
implementations in cases where CPU execution is unnecessary and performance
is critical.
**Performance Tuning**
To obtain optimal performance with this implementation of FIL, the single
most important value is the `chunk_size` parameter passed to the predict
method. Essentially, `chunk_size` determines how many rows to evaluate
together at once from a single batch. Larger values reduce global memory
accesses on GPU and cache misses on CPU, but smaller values allow for
finer-grained parallelism, improving usage of available processing power.
The optimal value for this parameter is hard to predict a priori, but in
general larger batch sizes benefit from larger chunk sizes and smaller
batch sizes benefit from smaller chunk sizes. Having a chunk size larger
than the batch size is never optimal.
To determine the optimal chunk size on GPU, test powers of 2 from 1 to
32. Values above 32 and values which are not powers of 2 are not supported.
To determine the optimal chunk size on CPU, test powers of 2 from 1 to
512. Values above 512 are supported, but RAPIDS developers have not yet
seen a case where they yield improved performance.
After chunk size, the most important performance parameter is `layout`,
also described below. Testing both breadth-first and depth-first is
recommended to optimize performance, but the impact is likely to be
substantially less than optimizing `chunk_size`. Particularly for large
models, the default value (depth-first) is likely to improve cache
hits and thereby increase performance, but this is not universally true.
`align_bytes` is the final performance parameter, but it has minimal
impact on both CPU and GPU and may be removed in a later version.
If set, this value causes trees to be padded with empty nodes until
their total in-memory size is a multiple of the given value.
Theoretically, this can improve performance by ensuring that reads of
tree data begin at a cache line boundary, but experimental evidence
offers limited support for this. It is recommended that a value of 128 be
used for GPU execution and a value of either None or 64 be used for CPU
execution.
Parameters
----------
treelite_model : treelite.Model
The model to be used for inference. This can be trained with XGBoost,
LightGBM, cuML, Scikit-Learn, or any other forest model framework
so long as it can be loaded into a treelite.Model object (See
https://treelite.readthedocs.io/en/latest/treelite-api.html).
handle : pylibraft.common.handle or None
For GPU execution, the RAFT handle containing the stream or stream
pool to use during loading and inference. If input is provide to
this model in the wrong memory location (e.g. host memory input but
GPU execution), the input will be copied to the correct location
using as many streams as are available in the handle. It is therefore
recommended that a handle with a stream pool be used for models where
it is expected that large input arrays will be coming from the host but
evaluated on device.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_class : boolean
True for classifier models, false for regressors.
layout : {'breadth_first', 'depth_first'}, default='depth_first'
The in-memory layout to be used during inference for nodes of the
forest model. This parameter is available purely for runtime
optimization. For performance-critical applications, it is
recommended that both layouts be tested with realistic batch sizes to
determine the optimal value.
align_bytes : int or None, default=None
If set, each tree will be padded with empty nodes until its in-memory
size is a multiple of the given value. It is recommended that a
value of 128 be used for GPU and either None or 64 be used for CPU.
precision : {'single', 'double', None}, default='single'
Use the given floating point precision for evaluating the model. If
None, use the native precision of the model. Note that
single-precision execution is substantially faster than
double-precision execution, so double-precision is recommended
only for models trained and double precision and when exact
conformance between results from FIL and the original training
framework is of paramount importance.
device_id : int, default=0
For GPU execution, the device on which to load and execute this
model. For CPU execution, this value is currently ignored.
"""
def _reload_model(self):
"""Reload model on any device (CPU/GPU) where model has already been
loaded"""
if hasattr(self, '_gpu_forest'):
with using_device_type('gpu'):
self._load_to_fil(device_id=self.device_id)
if hasattr(self, '_cpu_forest'):
with using_device_type('cpu'):
self._load_to_fil(device_id=self.device_id)
@property
def align_bytes(self):
try:
return self._align_bytes_
except AttributeError:
self._align_bytes_ = 0
return self._align_bytes_
@align_bytes.setter
def align_bytes(self, value):
try:
old_value = self._align_bytes_
except AttributeError:
old_value = value
if value is None:
self._align_bytes_ = 0
else:
self._align_bytes_ = value
if self.align_bytes != old_value:
self._reload_model()
@property
def precision(self):
try:
use_double_precision = \
self._use_double_precision_
except AttributeError:
self._use_double_precision_ = False
use_double_precision = \
self._use_double_precision_
if use_double_precision is None:
return 'native'
elif use_double_precision:
return 'double'
else:
return 'single'
@precision.setter
def precision(self, value):
try:
old_value = self._use_double_precision_
except AttributeError:
self._use_double_precision_ = False
old_value = self._use_double_precision_
if value in ('native', None):
self._use_double_precision_ = None
elif value in ('double', 'float64'):
self._use_double_precision_ = True
else:
self._use_double_precision_ = False
if old_value != self._use_double_precision_:
self._reload_model()
@property
def output_class(self):
warnings.warn(
'"output_class" has been renamed "is_classifier".'
' Support for the old parameter name will be removed in an'
' upcoming version.',
FutureWarning
)
return self.is_classifier
@output_class.setter
def output_class(self, value):
if value is not None:
warnings.warn(
'"output_class" has been renamed "is_classifier".'
' Support for the old parameter name will be removed in an'
' upcoming version.',
FutureWarning
)
self.is_classifier = value
@property
def is_classifier(self):
try:
return self._is_classifier_
except AttributeError:
self._is_classifier_ = False
return self._is_classifier_
@is_classifier.setter
def is_classifier(self, value):
if not hasattr(self, '_is_classifier_'):
self._is_classifier_ = value
elif value is not None:
self._is_classifier_ = value
@property
def device_id(self):
try:
return self._device_id_
except AttributeError:
self._device_id_ = 0
return self._device_id_
@device_id.setter
def device_id(self, value):
try:
old_value = self.device_id
except AttributeError:
old_value = None
if value is not None:
self._device_id_ = value
if (
self.treelite_model is not None
and self.device_id != old_value
and hasattr(self, '_gpu_forest')
):
self._load_to_fil(device_id=self.device_id)
@property
def treelite_model(self):
try:
return self._treelite_model_
except AttributeError:
return None
@treelite_model.setter
def treelite_model(self, value):
if value is not None:
self._treelite_model_ = value
self._reload_model()
@property
def layout(self):
try:
return self._layout_
except AttributeError:
self._layout_ = 'depth_first'
return self._layout_
@layout.setter
def layout(self, value):
try:
old_value = self._layout_
except AttributeError:
old_value = None
if value is not None:
self._layout_ = value
if old_value != value:
self._reload_model()
def __init__(
self,
*,
treelite_model=None,
handle=None,
output_type=None,
verbose=False,
is_classifier=False,
output_class=None,
layout='depth_first',
default_chunk_size=None,
align_bytes=None,
precision='single',
device_id=0):
super().__init__(
handle=handle, verbose=verbose, output_type=output_type
)
self.default_chunk_size = default_chunk_size
self.align_bytes = align_bytes
self.layout = layout
self.precision = precision
self.is_classifier = is_classifier
self.is_classifier = output_class
self.device_id = device_id
self.treelite_model = treelite_model
self._load_to_fil(device_id=self.device_id)
def _load_to_fil(self, mem_type=None, device_id=0):
if mem_type is None:
mem_type = GlobalSettings().memory_type
else:
mem_type = MemoryType.from_str(mem_type)
if mem_type.is_device_accessible:
self.device_id = device_id
if self.treelite_model is not None:
impl = ForestInference_impl(
self.handle,
self.treelite_model,
layout=self.layout,
align_bytes=self.align_bytes,
use_double_precision=self._use_double_precision_,
mem_type=mem_type,
device_id=self.device_id
)
if mem_type.is_device_accessible:
self._gpu_forest = impl
if mem_type.is_host_accessible:
self._cpu_forest = impl
@property
def gpu_forest(self):
"""The underlying FIL forest model loaded in GPU-accessible memory"""
try:
return self._gpu_forest
except AttributeError:
self._load_to_fil(mem_type=MemoryType.device)
return self._gpu_forest
@property
def cpu_forest(self):
"""The underlying FIL forest model loaded in CPU-accessible memory"""
try:
return self._cpu_forest
except AttributeError:
self._load_to_fil(mem_type=MemoryType.host)
return self._cpu_forest
@property
def forest(self):
"""The underlying FIL forest model loaded in memory compatible with the
current global device_type setting"""
if GlobalSettings().device_type == DeviceType.device:
return self.gpu_forest
elif GlobalSettings().device_type == DeviceType.host:
return self.cpu_forest
else:
raise DeviceTypeError("Unsupported device type for FIL")
def num_outputs(self):
return self.forest.num_outputs()
def num_trees(self):
return self.forest.num_trees()
@classmethod
@_handle_legacy_fil_args
def load(
cls,
path,
*,
output_class=False,
threshold=None,
algo=None,
storage_type=None,
blocks_per_sm=None,
threads_per_tree=None,
n_items=None,
compute_shape_str=None,
precision='single',
model_type=None,
output_type=None,
verbose=False,
default_chunk_size=None,
align_bytes=None,
layout='depth_first',
device_id=0,
handle=None):
"""Load a model into FIL from a serialized model file.
Parameters
----------
path : str
The path to the serialized model file. This can be an XGBoost
binary or JSON file, a LightGBM text file, or a Treelite checkpoint
file. If the model_type parameter is not passed, an attempt will be
made to load the file based on its extension.
output_class : boolean, default=False
True for classification models, False for regressors
threshold : float
For binary classifiers, outputs above this value will be considered
a positive detection.
algo
This parameter is deprecated. It is currently retained for
compatibility with existing FIL. Please see `layout` for a
parameter that fulfills a similar purpose.
storage_type
This parameter is deprecated. It is currently retained for
compatibility with existing FIL.
blocks_per_sm
This parameter is deprecated. It is currently retained for
compatibility with existing FIL.
threads_per_tree : int
This parameter is deprecated. It is currently retained for
compatibility with existing FIL. Please see the `chunk_size`
parameter of the predict method for equivalent functionality.
n_items
This parameter is deprecated. It is currently retained for
compatibility with existing FIL.
compute_shape_str
This parameter is deprecated. It is currently retained for
compatibility with existing FIL.
precision : {'single', 'double', None}, default='single'
Use the given floating point precision for evaluating the model. If
None, use the native precision of the model. Note that
single-precision execution is substantially faster than
double-precision execution, so double-precision is recommended
only for models trained and double precision and when exact
conformance between results from FIL and the original training
framework is of paramount importance.
model_type : {'xgboost', 'xgboost_json', 'lightgbm',
'treelite_checkpoint', None }, default=None
The serialization format for the model file. If None, a best-effort
guess will be made based on the file extension.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
default_chunk_size : int or None, default=None
If set, predict calls without a specified chunk size will use
this default value.
align_bytes : int or None, default=None
If set, each tree will be padded with empty nodes until its
in-memory size is a multiple of the given value. It is recommended
that a value of 128 be used for GPU and either None or 64 be used
for CPU.
layout : {'breadth_first', 'depth_first'}, default='depth_first'
The in-memory layout to be used during inference for nodes of the
forest model. This parameter is available purely for runtime
optimization. For performance-critical applications, it is
recommended that both layouts be tested with realistic batch sizes
to determine the optimal value.
device_id : int, default=0
For GPU execution, the device on which to load and execute this
model. For CPU execution, this value is currently ignored.
handle : pylibraft.common.handle or None
For GPU execution, the RAFT handle containing the stream or stream
pool to use during loading and inference.
"""
if model_type is None:
extension = pathlib.Path(path).suffix
if extension == '.json':
model_type = 'xgboost_json'
elif extension == '.model':
model_type = 'xgboost'
elif extension == '.txt':
model_type = 'lightgbm'
else:
model_type = 'treelite_checkpoint'
if model_type == 'treelite_checkpoint':
tl_model = treelite.frontend.Model.deserialize(path)
else:
tl_model = treelite.frontend.Model.load(
path, model_type
)
if default_chunk_size is None:
default_chunk_size = threads_per_tree
return cls(
treelite_model=tl_model,
handle=handle,
output_type=output_type,
verbose=verbose,
output_class=output_class,
default_chunk_size=default_chunk_size,
align_bytes=align_bytes,
layout=layout,
precision=precision,
device_id=device_id
)
@classmethod
@_handle_legacy_fil_args
def load_from_sklearn(
cls,
skl_model,
*,
output_class=False,
threshold=None,
algo=None,
storage_type=None,
blocks_per_sm=None,
threads_per_tree=None,
n_items=None,
compute_shape_str=None,
precision='single',
model_type=None,
output_type=None,
verbose=False,
default_chunk_size=None,
align_bytes=None,
layout='breadth_first',
device_id=0,
handle=None):
"""Load a Scikit-Learn forest model to FIL
Parameters
----------
skl_model
The Scikit-Learn forest model to load.
output_class : boolean, default=False
True for classification models, False for regressors
threshold : float
For binary classifiers, outputs above this value will be considered
a positive detection.
algo
This parameter is deprecated. It is currently retained for
compatibility with existing FIL. Please see `layout` for a
parameter that fulfills a similar purpose.
storage_type
This parameter is deprecated. It is currently retained for
compatibility with existing FIL.
blocks_per_sm
This parameter is deprecated. It is currently retained for
compatibility with existing FIL.
threads_per_tree : int
This parameter is deprecated. It is currently retained for
compatibility with existing FIL. Please see `chunk_size` for a
parameter that fulfills an equivalent purpose. If a value is passed
for this parameter, it will be used as the `chunk_size` for now.
n_items
This parameter is deprecated. It is currently retained for
compatibility with existing FIL.
compute_shape_str
This parameter is deprecated. It is currently retained for
compatibility with existing FIL.
precision : {'single', 'double', None}, default='single'
Use the given floating point precision for evaluating the model. If
None, use the native precision of the model. Note that
single-precision execution is substantially faster than
double-precision execution, so double-precision is recommended
only for models trained and double precision and when exact
conformance between results from FIL and the original training
framework is of paramount importance.
model_type : {'xgboost', 'xgboost_json', 'lightgbm',
'treelite_checkpoint', None }, default=None
The serialization format for the model file. If None, a best-effort
guess will be made based on the file extension.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
default_chunk_size : int or None, default=None
If set, predict calls without a specified chunk size will use
this default value.
align_bytes : int or None, default=None
If set, each tree will be padded with empty nodes until its
in-memory size is a multiple of the given value. It is recommended
that a
value of 128 be used for GPU and either None or 64 be used for CPU.
layout : {'breadth_first', 'depth_first'}, default='depth_first'
The in-memory layout to be used during inference for nodes of the
forest model. This parameter is available purely for runtime
optimization. For performance-critical applications, it is
recommended that both layouts be tested with realistic batch sizes
to determine the optimal value.
mem_type : {'device', 'host', None}, default='single'
The memory type to use for initially loading the model. If None,
the current global memory type setting will be used. If the model
is loaded with one memory type and inference is later requested
with an incompatible device (e.g. device memory and CPU execution),
the model will be lazily loaded to the correct location at that
time. In general, it should not be necessary to set this parameter
directly (rely instead on the `using_device_type` context manager),
but it can be a useful convenience for some hyperoptimization
pipelines.
device_id : int, default=0
For GPU execution, the device on which to load and execute this
model. For CPU execution, this value is currently ignored.
handle : pylibraft.common.handle or None
For GPU execution, the RAFT handle containing the stream or stream
pool to use during loading and inference.
"""
tl_model = treelite.sklearn.import_model(skl_model)
if default_chunk_size is None:
default_chunk_size = threads_per_tree
result = cls(
treelite_model=tl_model,
handle=handle,
output_type=output_type,
verbose=verbose,
output_class=output_class,
default_chunk_size=default_chunk_size,
align_bytes=align_bytes,
layout=layout,
precision=precision,
device_id=device_id
)
return result
@classmethod
def load_from_treelite_model(
cls,
tl_model,
*,
output_class=False,
threshold=None,
algo=None,
storage_type=None,
blocks_per_sm=None,
threads_per_tree=None,
n_items=None,
compute_shape_str=None,
precision='single',
model_type=None,
output_type=None,
verbose=False,
default_chunk_size=None,
align_bytes=None,
layout='breadth_first',
device_id=0,
handle=None):
"""Load a Treelite model to FIL
Parameters
----------
tl_model : treelite.model
The Treelite model to load.
output_class : boolean, default=False
True for classification models, False for regressors
threshold : float
For binary classifiers, outputs above this value will be considered
a positive detection.
algo
This parameter is deprecated. It is currently retained for
compatibility with existing FIL. Please see `layout` for a
parameter that fulfills a similar purpose.
storage_type
This parameter is deprecated. It is currently retained for
compatibility with existing FIL.
blocks_per_sm
This parameter is deprecated. It is currently retained for
compatibility with existing FIL.
threads_per_tree : int
This parameter is deprecated. It is currently retained for
compatibility with existing FIL. Please see `chunk_size` for a
parameter that fulfills an equivalent purpose. If a value is passed
for this parameter, it will be used as the `chunk_size` for now.
n_items
This parameter is deprecated. It is currently retained for
compatibility with existing FIL.
compute_shape_str
This parameter is deprecated. It is currently retained for
compatibility with existing FIL.
precision : {'single', 'double', None}, default='single'
Use the given floating point precision for evaluating the model. If
None, use the native precision of the model. Note that
single-precision execution is substantially faster than
double-precision execution, so double-precision is recommended
only for models trained and double precision and when exact
conformance between results from FIL and the original training
framework is of paramount importance.
model_type : {'xgboost', 'xgboost_json', 'lightgbm',
'treelite_checkpoint', None }, default=None
The serialization format for the model file. If None, a best-effort
guess will be made based on the file extension.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
default_chunk_size : int or None, default=None
If set, predict calls without a specified chunk size will use
this default value.
align_bytes : int or None, default=None
If set, each tree will be padded with empty nodes until its
in-memory size is a multiple of the given value. It is recommended
that a value of 128 be used for GPU and either None or 64 be used
for CPU.
layout : {'breadth_first', 'depth_first'}, default='depth_first'
The in-memory layout to be used during inference for nodes of the
forest model. This parameter is available purely for runtime
optimization. For performance-critical applications, it is
recommended that both layouts be tested with realistic batch sizes
to determine the optimal value.
mem_type : {'device', 'host', None}, default='single'
The memory type to use for initially loading the model. If None,
the current global memory type setting will be used. If the model
is loaded with one memory type and inference is later requested
with an incompatible device (e.g. device memory and CPU execution),
the model will be lazily loaded to the correct location at that
time. In general, it should not be necessary to set this parameter
directly (rely instead on the `using_device_type` context
manager), but it can be a useful convenience for some
hyperoptimization pipelines.
device_id : int, default=0
For GPU execution, the device on which to load and execute this
model. For CPU execution, this value is currently ignored.
handle : pylibraft.common.handle or None
For GPU execution, the RAFT handle containing the stream or stream
pool to use during loading and inference.
"""
if default_chunk_size is None:
default_chunk_size = threads_per_tree
return cls(
treelite_model=tl_model,
handle=handle,
output_type=output_type,
verbose=verbose,
output_class=output_class,
default_chunk_size=default_chunk_size,
align_bytes=align_bytes,
layout=layout,
precision=precision,
device_id=device_id
)
@nvtx_annotate(
message='ForestInference.predict_proba',
domain='cuml_python'
)
def predict_proba(self, X, *, preds=None, chunk_size=None) -> CumlArray:
"""
Predict the class probabilities for each row in X.
Parameters
----------
X
The input data of shape Rows X Features. This can be a numpy
array, cupy array, Pandas/cuDF Dataframe or any other array type
accepted by cuML. FIL is optimized for C-major arrays (e.g.
numpy/cupy arrays). Inputs whose datatype does not match the
precision of the loaded model (float/double) will be converted
to the correct datatype before inference. If this input is in a
memory location that is inaccessible to the current device type
(as set with e.g. the `using_device_type` context manager),
it will be copied to the correct location. This copy will be
distributed across as many CUDA streams as are available
in the stream pool of the model's RAFT handle.
preds
If non-None, outputs will be written in-place to this array.
Therefore, if given, this should be a C-major array of shape Rows x
Classes with a datatype (float/double) corresponding to the
precision of the model. If None, an output array of the correct
shape and type will be allocated and returned.
chunk_size : int
The number of rows to simultaneously process in one iteration
of the inference algorithm. Batches are further broken down into
"chunks" of this size when assigning available threads to tasks.
The choice of chunk size can have a substantial impact on
performance, but the optimal choice depends on model and
hardware and is difficult to predict a priori. In general,
larger batch sizes benefit from larger chunk sizes, and smaller
batch sizes benefit from small chunk sizes. On GPU, valid
values are powers of 2 from 1 to 32. On CPU, valid values are
any power of 2, but little benefit is expected above a chunk size
of 512.
"""
if not self.is_classifier:
raise RuntimeError(
"predict_proba is not available for regression models. Load"
" with is_classifer=True if this is a classifier."
)
return self.forest.predict(
X, preds=preds, chunk_size=(chunk_size or self.default_chunk_size)
)
@nvtx_annotate(
message='ForestInference.predict',
domain='cuml_python'
)
def predict(
self,
X,
*,
preds=None,
chunk_size=None,
threshold=None) -> CumlArray:
"""
For classification models, predict the class for each row. For
regression models, predict the output for each row.
Parameters
----------
X
The input data of shape Rows X Features. This can be a numpy
array, cupy array, Pandas/cuDF Dataframe or any other array type
accepted by cuML. FIL is optimized for C-major arrays (e.g.
numpy/cupy arrays). Inputs whose datatype does not match the
precision of the loaded model (float/double) will be converted
to the correct datatype before inference. If this input is in a
memory location that is inaccessible to the current device type
(as set with e.g. the `using_device_type` context manager),
it will be copied to the correct location. This copy will be
distributed across as many CUDA streams as are available
in the stream pool of the model's RAFT handle.
preds
If non-None, outputs will be written in-place to this array.
Therefore, if given, this should be a C-major array of shape Rows x
1 with a datatype (float/double) corresponding to the precision of
the model. If None, an output array of the correct shape and
type will be allocated and returned. For classifiers, in-place
prediction offers no performance or memory benefit. For regressors,
in-place prediction offers both a performance and memory
benefit.
chunk_size : int
The number of rows to simultaneously process in one iteration
of the inference algorithm. Batches are further broken down into
"chunks" of this size when assigning available threads to tasks.
The choice of chunk size can have a substantial impact on
performance, but the optimal choice depends on model and
hardware and is difficult to predict a priori. In general,
larger batch sizes benefit from larger chunk sizes, and smaller
batch sizes benefit from small chunk sizes. On GPU, valid
values are powers of 2 from 1 to 32. On CPU, valid values are
any power of 2, but little benefit is expected above a chunk size
of 512.
threshold : float
For binary classifiers, output probabilities above this threshold
will be considered positive detections. If None, a threshold
of 0.5 will be used for binary classifiers. For multiclass
classifiers, the highest probability class is chosen regardless
of threshold.
"""
chunk_size = (chunk_size or self.default_chunk_size)
if self.forest.row_postprocessing() == 'max_index':
raw_out = self.forest.predict(X, chunk_size=chunk_size)
result = raw_out[:, 0]
if preds is None:
return result
else:
preds[:] = result
return preds
elif self.is_classifier:
proba = self.forest.predict(X, chunk_size=chunk_size)
if len(proba.shape) < 2 or proba.shape[1] == 1:
if threshold is None:
threshold = 0.5
result = (
proba.to_output(output_type='array') > threshold
).astype('int')
else:
result = GlobalSettings().xpy.argmax(
proba.to_output(output_type='array'), axis=1
)
if preds is None:
return result
else:
preds[:] = result
return preds
else:
return self.forest.predict(
X, predict_type="default", preds=preds, chunk_size=chunk_size
)
@nvtx_annotate(
message='ForestInference.predict_per_tree',
domain='cuml_python'
)
def predict_per_tree(
self,
X,
*,
preds=None,
chunk_size=None) -> CumlArray:
"""
Output prediction of each tree.
This function computes one or more margin scores per tree.
Parameters
----------
X
The input data of shape Rows X Features. This can be a numpy
array, cupy array, Pandas/cuDF Dataframe or any other array type
accepted by cuML. FIL is optimized for C-major arrays (e.g.
numpy/cupy arrays). Inputs whose datatype does not match the
precision of the loaded model (float/double) will be converted
to the correct datatype before inference. If this input is in a
memory location that is inaccessible to the current device type
(as set with e.g. the `using_device_type` context manager),
it will be copied to the correct location. This copy will be
distributed across as many CUDA streams as are available
in the stream pool of the model's RAFT handle.
preds
If non-None, outputs will be written in-place to this array.
Therefore, if given, this should be a C-major array of shape
n_rows * n_trees * n_outputs (if vector leaf is used) or
shape n_rows * n_trees (if scalar leaf is used).
Classes with a datatype (float/double) corresponding to the
precision of the model. If None, an output array of the correct
shape and type will be allocated and returned.
chunk_size : int
The number of rows to simultaneously process in one iteration
of the inference algorithm. Batches are further broken down into
"chunks" of this size when assigning available threads to tasks.
The choice of chunk size can have a substantial impact on
performance, but the optimal choice depends on model and
hardware and is difficult to predict a priori. In general,
larger batch sizes benefit from larger chunk sizes, and smaller
batch sizes benefit from small chunk sizes. On GPU, valid
values are powers of 2 from 1 to 32. On CPU, valid values are
any power of 2, but little benefit is expected above a chunk size
of 512.
"""
chunk_size = (chunk_size or self.default_chunk_size)
return self.forest.predict(
X, predict_type="per_tree", preds=preds, chunk_size=chunk_size
)
@nvtx_annotate(
message='ForestInference.apply',
domain='cuml_python'
)
def apply(
self,
X,
*,
preds=None,
chunk_size=None) -> CumlArray:
"""
Output the ID of the leaf node for each tree.
Parameters
----------
X
The input data of shape Rows X Features. This can be a numpy
array, cupy array, Pandas/cuDF Dataframe or any other array type
accepted by cuML. FIL is optimized for C-major arrays (e.g.
numpy/cupy arrays). Inputs whose datatype does not match the
precision of the loaded model (float/double) will be converted
to the correct datatype before inference. If this input is in a
memory location that is inaccessible to the current device type
(as set with e.g. the `using_device_type` context manager),
it will be copied to the correct location. This copy will be
distributed across as many CUDA streams as are available
in the stream pool of the model's RAFT handle.
preds
If non-None, outputs will be written in-place to this array.
Therefore, if given, this should be a C-major array of shape
n_rows * n_trees.
Classes with a datatype (float/double) corresponding to the
precision of the model. If None, an output array of the correct
shape and type will be allocated and returned.
chunk_size : int
The number of rows to simultaneously process in one iteration
of the inference algorithm. Batches are further broken down into
"chunks" of this size when assigning available threads to tasks.
The choice of chunk size can have a substantial impact on
performance, but the optimal choice depends on model and
hardware and is difficult to predict a priori. In general,
larger batch sizes benefit from larger chunk sizes, and smaller
batch sizes benefit from small chunk sizes. On GPU, valid
values are powers of 2 from 1 to 32. On CPU, valid values are
any power of 2, but little benefit is expected above a chunk size
of 512.
"""
return self.forest.predict(
X, predict_type="leaf_id", preds=preds, chunk_size=chunk_size
)
def optimize(
self,
*,
data=None,
batch_size=1024,
unique_batches=10,
timeout=0.2,
predict_method='predict',
max_chunk_size=None,
seed=0
):
"""
Find the optimal layout and chunk size for this model
The optimal value for layout and chunk size depends on the model,
batch size, and available hardware. In order to get the most
realistic performance distribution, example data can be provided. If
it is not, random data will be generated based on the indicated batch
size. After finding the optimal layout, the model will be reloaded if
necessary. The optimal chunk size will be used to set the default chunk
size used if none is passed to the predict call.
Parameters
----------
data
Example data either of shape unique_batches x batch size x features
or batch_size x features or None. If None, random data will be
generated instead.
batch_size : int
If example data is not provided, random data with this many rows
per batch will be used.
unique_batches : int
The number of unique batches to generate if random data are used.
Increasing this number decreases the chance that the optimal
configuration will be skewed by a single batch with unusual
performance characteristics.
timeout : float
Time in seconds to target for optimization. The optimization loop
will be repeatedly run a number of times increasing in the sequence
1, 2, 5, 10, 20, 50, ... until the time taken is at least the given
value. Note that for very large batch sizes and large models, the
total elapsed time may exceed this timeout; it is a soft target for
elapsed time. Setting the timeout to zero will run through the
indicated number of unique batches exactly once. Defaults to 0.2s.
predict_method : str
If desired, optimization can occur over one of the prediction
method variants (e.g. "predict_per_tree") rather than the
default `predict` method. To do so, pass the name of the method
here.
max_chunk_size : int or None
The maximum chunk size to explore during optimization. If not
set, a value will be picked based on the current device type.
Setting this to a lower value will reduce the optimization search
time but may not result in optimal performance.
seed : int
The random seed used for generating example data if none is
provided.
"""
if data is None:
xpy = GlobalSettings().xpy
dtype = self.forest.get_dtype()
data = xpy.random.uniform(
xpy.finfo(dtype).min / 2,
xpy.finfo(dtype).max / 2,
(unique_batches, batch_size, self.forest.num_features())
)
else:
data = CumlArray.from_input(
data,
order='K',
).to_output('array')
try:
unique_batches, batch_size, features = data.shape
except ValueError:
unique_batches = 1
batch_size, features = data.shape
data = [data]
if max_chunk_size is None:
max_chunk_size = 512
if GlobalSettings().device_type is DeviceType.device:
max_chunk_size = min(max_chunk_size, 32)
infer = getattr(self, predict_method)
optimal_layout = 'depth_first'
optimal_chunk_size = 1
valid_layouts = ('depth_first', 'breadth_first')
chunk_size = 1
valid_chunk_sizes = []
while chunk_size <= max_chunk_size:
valid_chunk_sizes.append(chunk_size)
chunk_size *= 2
all_params = list(itertools.product(valid_layouts, valid_chunk_sizes))
auto_iterator = _AutoIterations()
loop_start = perf_counter()
while True:
optimal_time = float('inf')
iterations = auto_iterator.next()
for layout, chunk_size in all_params:
self.layout = layout
infer(data[0], chunk_size=chunk_size)
elapsed = float('inf')
for _ in range(iterations):
start = perf_counter()
for iter_index in range(unique_batches):
infer(
data[iter_index], chunk_size=chunk_size
)
elapsed = min(elapsed, perf_counter() - start)
if elapsed < optimal_time:
optimal_time = elapsed
optimal_layout = layout
optimal_chunk_size = chunk_size
if (perf_counter() - loop_start > timeout):
break
self.layout = optimal_layout
self.default_chunk_size = optimal_chunk_size
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental | rapidsai_public_repos/cuml/python/cuml/experimental/fil/infer_kind.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
cdef extern from "treelite/c_api.h":
ctypedef void* ModelHandle
cdef extern from "cuml/experimental/fil/infer_kind.hpp" namespace "ML::experimental::fil":
# TODO(hcho3): Switch to new syntax for scoped enum when we adopt Cython 3.0
cdef enum infer_kind:
default_kind "ML::experimental::fil::infer_kind::default_kind"
per_tree "ML::experimental::fil::infer_kind::per_tree"
leaf_id "ML::experimental::fil::infer_kind::leaf_id"
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental | rapidsai_public_repos/cuml/python/cuml/experimental/fil/__init__.py | #
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.experimental.fil.fil import ForestInference
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental/fil | rapidsai_public_repos/cuml/python/cuml/experimental/fil/detail/__init__.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental/fil/detail | rapidsai_public_repos/cuml/python/cuml/experimental/fil/detail/raft_proto/cuda_stream.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
cdef extern from "cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp" namespace "raft_proto" nogil:
cdef cppclass cuda_stream:
pass
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental/fil/detail | rapidsai_public_repos/cuml/python/cuml/experimental/fil/detail/raft_proto/device_type.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
cdef extern from "cuml/experimental/fil/detail/raft_proto/device_type.hpp" namespace "raft_proto" nogil:
cdef enum device_type:
cpu "raft_proto::device_type::cpu",
gpu "raft_proto::device_type::gpu"
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental/fil/detail | rapidsai_public_repos/cuml/python/cuml/experimental/fil/detail/raft_proto/handle.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from cuml.experimental.fil.detail.raft_proto.cuda_stream cimport cuda_stream as raft_proto_stream_t
from pylibraft.common.handle cimport handle_t as raft_handle_t
cdef extern from "cuml/experimental/fil/detail/raft_proto/handle.hpp" namespace "raft_proto" nogil:
cdef cppclass handle_t:
handle_t() except +
handle_t(const raft_handle_t* handle_ptr) except +
handle_t(const raft_handle_t& handle) except +
raft_proto_stream_t get_next_usable_stream() except +
void synchronize() except+
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental/fil/detail | rapidsai_public_repos/cuml/python/cuml/experimental/fil/detail/raft_proto/optional.pxd | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The following is taken from
# https://github.com/cython/cython/blob/master/Cython/Includes/libcpp/optional.pxd,
# which provides a binding for std::optional in Cython 3.0
from libcpp cimport bool
cdef extern from "<optional>" namespace "std" nogil:
cdef cppclass nullopt_t:
nullopt_t()
cdef nullopt_t nullopt
cdef cppclass optional[T]:
ctypedef T value_type
optional()
optional(nullopt_t)
optional(optional&) except +
optional(T&) except +
bool has_value()
T& value()
T& value_or[U](U& default_value)
void swap(optional&)
void reset()
T& emplace(...)
T& operator*()
# T* operator->() # Not Supported
optional& operator=(optional&)
optional& operator=[U](U&)
bool operator bool()
bool operator!()
bool operator==[U](optional&, U&)
bool operator!=[U](optional&, U&)
bool operator<[U](optional&, U&)
bool operator>[U](optional&, U&)
bool operator<=[U](optional&, U&)
bool operator>=[U](optional&, U&)
optional[T] make_optional[T](...) except +
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental/fil/detail | rapidsai_public_repos/cuml/python/cuml/experimental/fil/detail/raft_proto/__init__.py | #
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental | rapidsai_public_repos/cuml/python/cuml/experimental/linear_model/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("lars.pyx" ${lars_algo} ${linear_model_algo})
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_sg_libraries}"
MODULE_PREFIX experimental_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental | rapidsai_public_repos/cuml/python/cuml/experimental/linear_model/lars.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
import cuml.internals.logger as logger
import cuml.internals
from libcpp cimport nullptr
from libc.stdint cimport uintptr_t
from cuml.common import input_to_cuml_array
from cuml.internals.array import CumlArray
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.base import Base
from cuml.internals.mixins import RegressorMixin
from cuml.common.doc_utils import generate_docstring
from pylibraft.common.handle cimport handle_t
cdef extern from "cuml/solvers/lars.hpp" namespace "ML::Solver::Lars":
cdef void larsFit[math_t](
const handle_t& handle, math_t* X, int n_rows, int n_cols,
const math_t* y, math_t* beta, int* active_idx, math_t* alphas,
int* n_active, math_t* Gram, int max_iter, math_t* coef_path,
int verbosity, int ld_X, int ld_G, math_t epsilon) except +
cdef void larsPredict[math_t](
const handle_t& handle, const math_t* X, int n_rows, int n_cols,
int ld_X, const math_t* beta, int n_active, int* active_idx,
math_t intercept, math_t* preds) except +
class Lars(Base, RegressorMixin):
"""
Least Angle Regression
Least Angle Regression (LAR or LARS) is a model selection algorithm. It
builds up the model using the following algorithm:
1. We start with all the coefficients equal to zero.
2. At each step we select the predictor that has the largest absolute
correlation with the residual.
3. We take the largest step possible in the direction which is equiangular
with all the predictors selected so far. The largest step is determined
such that using this step a new predictor will have as much correlation
with the residual as any of the currently active predictors.
4. Stop if `max_iter` reached or all the predictors are used, or if the
correlation between any unused predictor and the residual is lower than
a tolerance.
The solver is based on [1]_. The equations referred in the comments
correspond to the equations in the paper.
.. note:: This algorithm assumes that the offset is removed from `X` and
`y`, and each feature is normalized:
.. math::
sum_i y_i = 0, sum_i x_{i,j} = 0,sum_i x_{i,j}^2=1 \
for j=0..n_{col}-1
Parameters
----------
fit_intercept : boolean (default = True)
If True, Lars tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
normalize : boolean (default = True)
This parameter is ignored when `fit_intercept` is set to False.
If True, the predictors in X will be normalized by removing its mean
and dividing by it's variance. If False, then the solver expects that
the data is already normalized.
copy_X : boolean (default = True)
The solver permutes the columns of X. Set `copy_X` to True to prevent
changing the input data.
fit_path : boolean (default = True)
Whether to return all the coefficients along the regularization path
in the `coef_path_` attribute.
precompute : bool, 'auto', or array-like with shape = (n_features, \
n_features). (default = 'auto')
Whether to precompute the Gram matrix. The user can provide the Gram
matrix as an argument.
n_nonzero_coefs : int (default 500)
The maximum number of coefficients to fit. This gives an upper limit of
how many features we select for prediction. This number is also an
upper limit of the number of iterations.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Attributes
----------
alphas_ : array of floats or doubles, shape = [n_alphas + 1]
The maximum correlation at each step.
active_ : array of ints shape = [n_alphas]
The indices of the active variables at the end of the path.
beta_ : array of floats or doubles [n_asphas]
The active regression coefficients (same as `coef_` but zeros omitted).
coef_path_ : array of floats or doubles, shape = [n_alphas, n_alphas + 1]
The coefficients along the regularization path. Stored only if
`fit_path` is True. Note that we only store coefficients for indices
in the active set (i.e. :py:`coef_path_[:,-1] == coef_[active_]`)
coef_ : array, shape (n_features)
The estimated coefficients for the regression model.
intercept_ : scalar, float or double
The independent term. If `fit_intercept_` is False, will be 0.
n_iter_ : int
The number of iterations taken by the solver.
Notes
-----
For additional information, see `scikitlearn's OLS documentation
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lars.html>`__.
References
----------
.. [1] `B. Efron, T. Hastie, I. Johnstone, R Tibshirani, Least Angle
Regression The Annals of Statistics (2004) Vol 32, No 2, 407-499
<http://statweb.stanford.edu/~tibs/ftp/lars.pdf>`_
"""
alphas_ = CumlArrayDescriptor()
active_ = CumlArrayDescriptor()
beta_ = CumlArrayDescriptor()
coef_path_ = CumlArrayDescriptor()
coef_ = CumlArrayDescriptor()
intercept_ = CumlArrayDescriptor()
def __init__(self, *, fit_intercept=True, normalize=True,
handle=None, verbose=False, output_type=None, copy_X=True,
fit_path=True, n_nonzero_coefs=500, eps=None,
precompute='auto'):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
self.n_nonzero_coefs = n_nonzero_coefs # this corresponds to max_iter
self.precompute = precompute
def _preprocess_data(self, X_m, y_m):
""" Remove mean and scale each feature column. """
x_mean = cp.zeros(self.n_cols, dtype=self.dtype)
x_scale = cp.ones(self.n_cols, dtype=self.dtype)
y_mean = self.dtype.type(0.0)
X = cp.asarray(X_m)
y = cp.asarray(y_m)
if self.fit_intercept:
y_mean = cp.mean(y)
y = y - y_mean
if self.normalize:
x_mean = cp.mean(X, axis=0)
x_scale = cp.sqrt(cp.var(X, axis=0) *
self.dtype.type(X.shape[0]))
x_scale[x_scale==0] = 1
X = (X - x_mean) / x_scale
return X, y, x_mean, x_scale, y_mean
def _set_intercept(self, x_mean, x_scale, y_mean):
""" Set the intercept value and scale coefficients. """
if self.fit_intercept:
with cuml.using_output_type('cupy'):
self.coef_ = self.coef_ / x_scale
self.intercept_ = y_mean - cp.dot(x_mean, self.coef_.T)
self.intercept_ = self.intercept_.item()
else:
self.intercept_ = self.dtype.type(0.0)
def _calc_gram(self, X):
"""
Return the Gram matrix, or None if it is not applicable.
"""
Gram = None
X = cp.asarray(X)
if self.precompute is True or (self.precompute == 'auto' and
self.n_cols < X.shape[0]):
logger.debug('Calculating Gram matrix')
try:
Gram = cp.dot(X.T, X)
except MemoryError as err:
if self.precompute:
logger.debug("Not enough memory to store the Gram matrix."
" Proceeding without it.")
return Gram
def _fit_cpp(self, X, y, Gram, x_scale):
""" Fit lars model using cpp solver"""
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
X_m, _, _, _ = input_to_cuml_array(X, check_dtype=self.dtype,
order='F')
cdef uintptr_t X_ptr = X_m.ptr
cdef int n_rows = X.shape[0]
cdef uintptr_t y_ptr = \
input_to_cuml_array(y, check_dtype=self.dtype).array.ptr
cdef int max_iter = self.n_nonzero_coefs
self.beta_ = CumlArray.zeros(max_iter, dtype=self.dtype)
cdef uintptr_t beta_ptr = self.beta_.ptr
self.active_ = CumlArray.zeros(max_iter, dtype=np.int32)
cdef uintptr_t active_idx_ptr = self.active_.ptr
self.alphas_ = CumlArray.zeros(max_iter+1, dtype=self.dtype)
cdef uintptr_t alphas_ptr = self.alphas_.ptr
cdef int n_active
cdef uintptr_t Gram_ptr = <uintptr_t> nullptr
if Gram is not None:
Gram_m, _, _, _ = input_to_cuml_array(Gram)
Gram_ptr = Gram_m.ptr
cdef uintptr_t coef_path_ptr = <uintptr_t> nullptr
if (self.fit_path):
try:
self.coef_path_ = CumlArray.zeros((max_iter, max_iter+1),
dtype=self.dtype, order='F')
except MemoryError as err:
raise MemoryError("Not enough memory to store coef_path_. "
"Try to decrease n_nonzero_coefs or set "
"fit_path=False.") from err
coef_path_ptr = self.coef_path_.ptr
cdef int ld_X = n_rows
cdef int ld_G = self.n_cols
if self.dtype == np.float32:
larsFit(handle_[0], <float*> X_ptr, n_rows, <int> self.n_cols,
<float*> y_ptr, <float*> beta_ptr, <int*> active_idx_ptr,
<float*> alphas_ptr, &n_active, <float*> Gram_ptr,
max_iter, <float*> coef_path_ptr, <int> self.verbose, ld_X,
ld_G, <float> self.eps)
else:
larsFit(handle_[0], <double*> X_ptr, n_rows, <int> self.n_cols,
<double*> y_ptr, <double*> beta_ptr, <int*> active_idx_ptr,
<double*> alphas_ptr, &n_active, <double*> Gram_ptr,
max_iter, <double*> coef_path_ptr, <int> self.verbose,
ld_X, ld_G, <double> self.eps)
self.n_active = n_active
self.n_iter_ = n_active
with cuml.using_output_type("cupy"):
self.active_ = self.active_[:n_active]
self.beta_ = self.beta_[:n_active]
self.alphas_ = self.alphas_[:n_active+1]
self.coef_ = cp.zeros(self.n_cols, dtype=self.dtype)
self.coef_[self.active_] = self.beta_
if self.fit_intercept:
self.beta_ = self.beta_ / x_scale[self.active_]
@generate_docstring(y='dense_anydtype')
def fit(self, X, y, convert_dtype=True) -> 'Lars':
"""
Fit the model with X and y.
"""
self._set_n_features_in(X)
self._set_output_type(X)
X_m, n_rows, self.n_cols, self.dtype = input_to_cuml_array(
X, check_dtype=[np.float32, np.float64], order='F')
conv_dtype = self.dtype if convert_dtype else None
y_m, _, _, _ = input_to_cuml_array(
y, order='F', check_dtype=self.dtype, convert_to_dtype=conv_dtype,
check_rows=n_rows, check_cols=1)
X, y, x_mean, x_scale, y_scale = self._preprocess_data(X_m, y_m)
if hasattr(self.precompute, '__cuda_array_interface__') or \
hasattr(self.precompute, '__array_interface__'):
Gram, _, _, _ = \
input_to_cuml_array(self.precompute, order='F',
check_dtype=[np.float32, np.float64],
convert_to_dtype=conv_dtype,
check_rows=self.n_cols,
check_cols=self.n_cols)
logger.debug('Using precalculated Gram matrix')
else:
Gram = self._calc_gram(X)
if Gram is None and self.copy_X and not isinstance(X, np.ndarray):
# Without Gram matrix, the solver will permute columns of X
# We make a copy here, and work on the copy.
X = cp.copy(X)
if self.eps is None:
self.eps = np.finfo(float).eps
self._fit_cpp(X, y, Gram, x_scale)
self._set_intercept(x_mean, x_scale, y_scale)
self.handle.sync()
del X_m
del y_m
del Gram
return self
def predict(self, X, convert_dtype=True) -> CumlArray:
"""
Predicts `y` values for `X`.
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Dense matrix (floats or doubles) of shape (n_samples, n_features).
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
convert_dtype : bool, optional (default = True)
When set to True, the predict method will, when necessary, convert
the input to the data type which was used to train the model. This
will increase memory used for the method.
Returns
-------
y: cuDF DataFrame
Dense vector (floats or doubles) of shape (n_samples, 1)
"""
conv_dtype=(self.dtype if convert_dtype else None)
X_m, n_rows, _n_cols, _dtype = input_to_cuml_array(
X, check_dtype=self.dtype, convert_to_dtype=conv_dtype,
check_cols=self.n_cols, order='F')
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef uintptr_t X_ptr = X_m.ptr
cdef int ld_X = n_rows
cdef uintptr_t beta_ptr = input_to_cuml_array(self.beta_).array.ptr
cdef uintptr_t active_idx_ptr = \
input_to_cuml_array(self.active_).array.ptr
preds = CumlArray.zeros(n_rows, dtype=self.dtype, index=X_m.index)
if self.dtype == np.float32:
larsPredict(handle_[0], <float*> X_ptr, <int> n_rows,
<int> self.n_cols, ld_X, <float*> beta_ptr,
<int> self.n_active, <int*> active_idx_ptr,
<float> self.intercept_,
<float*><uintptr_t> preds.ptr)
else:
larsPredict(handle_[0], <double*> X_ptr, <int> n_rows,
<int> self.n_cols, ld_X, <double*> beta_ptr,
<int> self.n_active, <int*> active_idx_ptr,
<double> self.intercept_,
<double*><uintptr_t> preds.ptr)
self.handle.sync()
del X_m
return preds
def get_param_names(self):
return super().get_param_names() + \
['copy_X', 'fit_intercept', 'fit_path', 'n_nonzero_coefs',
'normalize', 'precompute', 'eps']
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental | rapidsai_public_repos/cuml/python/cuml/experimental/linear_model/__init__.py | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.experimental.linear_model.lars import Lars
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental | rapidsai_public_repos/cuml/python/cuml/experimental/hyperparams/HPO_demo.ipynb | import warnings
warnings.filterwarnings('ignore') # Reduce number of messages/warnings displayedimport time
import numpy as np
import cupy as cp
import pandas as pd
import cudf
import cuml
import rmm
import xgboost as xgb
import sklearn.model_selection as sk
import dask_ml.model_selection as dcv
from dask.distributed import Client, wait
from dask_cuda import LocalCUDACluster
from sklearn import datasets
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score as sk_acc
from cuml.neighbors import KNeighborsClassifier
from cuml.preprocessing.model_selection import train_test_split
from cuml.metrics.accuracy import accuracy_score
import os
from urllib.request import urlretrieve
import gzipcluster = LocalCUDACluster(dashboard_address="127.0.0.1:8005")
client = Client(cluster)
clientdef download_higgs(compressed_filepath, decompressed_filepath):
higgs_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz'
if not os.path.isfile(compressed_filepath):
urlretrieve(higgs_url, compressed_filepath)
if not os.path.isfile(decompressed_filepath):
cf = gzip.GzipFile(compressed_filepath)
with open(decompressed_filepath, 'wb') as df:
df.write(cf.read())data_dir = '/home/hyperopt/data/'
if not os.path.exists(data_dir):
print('creating data directory')
os.system('mkdir /home/data/')compressed_filepath = os.path.join(data_dir, 'HIGGS.csv.gz') # Set this as path for gzipped Higgs data file, if you already have
decompressed_filepath = os.path.join(data_dir, 'HIGGS.csv') # Set this as path for decompressed Higgs data file, if you already have
# Uncomment this line to download the dataset.
# download_higgs(compressed_filepath, decompressed_filepath)
col_names = ['label'] + ["col-{}".format(i) for i in range(2, 30)] # Assign column names
dtypes_ls = ['int32'] + ['float32' for _ in range(2, 30)] # Assign dtypes to each column
input_data = cudf.read_csv(decompressed_filepath, names=col_names, dtype=dtypes_ls)labels = input_data.label.reset_index().drop(['index'], axis=1)
for col in labels.columns:
labels[col] = labels[col].astype('float32')
data = input_data.drop(['label'], axis=1)len(data)import time
from contextlib import contextmanager
# Helping time blocks of code
@contextmanager
def timed(txt):
t0 = time.time()
yield
t1 = time.time()
print("%32s time: %8.5f" % (txt, t1 - t0))data_fraction = 0.1N_ROWS = int(len(data) * data_fraction)
# Define some default values to make use of across the notebook for a fair comparison
N_FOLDS = 5
N_ESTIMATORS = 100
MAX_DEPTH = 5
N_ITER = 100
print(N_ROWS)data = data[:N_ROWS]
labels = labels[:N_ROWS]X_train, X_test, y_train, y_test = train_test_split(data,
labels,
test_size=0.2)X_cpu = X_train.to_pandas()
y_cpu = y_train.label.to_numpy()
X_test_cpu = X_test.to_pandas()
y_test_cpu = y_test.label.to_numpy()def accuracy_score_wrapper(y, y_hat):
"""
A wrapper function to convert labels to float32,
and pass it to accuracy_score.
Params:
- y: The y labels that need to be converted
- y_hat: The predictions made by the model
"""
y = y.astype("float32") # cuML RandomForest needs the y labels to be float32
return accuracy_score(y, y_hat, convert_dtype=True)
accuracy_wrapper_scorer = make_scorer(accuracy_score_wrapper)
cuml_accuracy_scorer = make_scorer(accuracy_score, convert_dtype=True)def do_HPO(model, gridsearch_params, scorer, X, y, mode='gpu-Grid', n_iter=10):
"""
Perform HPO based on the mode specified
mode: default gpu-Grid. The possible options are:
1. gpu-grid: Perform GPU based GridSearchCV
2. gpu-random: Perform GPU based RandomizedSearchCV
3. cpu-grid: Perform CPU based GridSearchCV
4. cpu-random: Perform CPU based RandomizedSearchCV
n_iter: specified with Random option for number of parameter settings sampled
Returns the best estimator and the results of the search
"""
if mode == 'cpu-grid':
print("cpu-grid selected")
clf = dcv.GridSearchCV(model,
gridsearch_params,
cv=N_FOLDS,
scoring=scorer)
elif mode == 'gpu-grid':
print("gpu-grid selected")
clf = dcv.GridSearchCV(model,
gridsearch_params,
cv=N_FOLDS,
scoring=scorer)
elif mode == 'gpu-random':
print("gpu-random selected")
clf = dcv.RandomizedSearchCV(model,
gridsearch_params,
cv=N_FOLDS,
scoring=scorer,
n_iter=n_iter)
elif mode == 'cpu-random':
print("cpu-random selected")
clf = dcv.RandomizedSearchCV(model,
gridsearch_params,
cv=N_FOLDS,
scoring=scorer,
n_iter=n_iter)
else:
print("Unknown Option, please choose one of [gpu-grid, gpu-random, cpu-grid, cpu-random]")
return None, None
res = clf.fit(X, y)
print("Best clf and score {} {}\n---\n".format(res.best_estimator_, res.best_score_))
return res.best_estimator_, resdef print_acc(model, X_train, y_train, X_test, y_test, mode_str="Default"):
"""
Trains a model on the train data provided, and prints the accuracy of the trained model.
mode_str: User specifies what model it is to print the value
"""
y_pred = model.fit(X_train, y_train).predict(X_test)
score = accuracy_score(y_pred, y_test.astype('float32'), convert_dtype=True)
print("{} model accuracy: {}".format(mode_str, score))
X_train.shapemodel_gpu_xgb_ = xgb.XGBClassifier(tree_method='gpu_hist')
print_acc(model_gpu_xgb_, X_train, y_cpu, X_test, y_test_cpu)# For xgb_model
model_gpu_xgb = xgb.XGBClassifier(tree_method='gpu_hist')
# More range
params_xgb = {
"max_depth": np.arange(start=3, stop = 15, step = 3), # Default = 6
"alpha" : np.logspace(-3, -1, 5), # default = 0
"learning_rate": [0.05, 0.1, 0.15], #default = 0.3
"min_child_weight" : np.arange(start=2, stop=10, step=3), # default = 1
"n_estimators": [100, 200, 1000]
}mode = "gpu-random"
with timed("XGB-"+mode):
res, results = do_HPO(model_gpu_xgb,
params_xgb,
cuml_accuracy_scorer,
X_train,
y_cpu,
mode=mode,
n_iter=N_ITER)
print("Searched over {} parameters".format(len(results.cv_results_['mean_test_score'])))
print_acc(res, X_train, y_cpu, X_test, y_test_cpu, mode_str=mode)mode = "gpu-grid"
with timed("XGB-"+mode):
res, results = do_HPO(model_gpu_xgb,
params_xgb,
cuml_accuracy_scorer,
X_train,
y_cpu,
mode=mode)
print("Searched over {} parameters".format(len(results.cv_results_['mean_test_score'])))print_acc(res, X_train, y_cpu, X_test, y_test_cpu, mode_str=mode)from cuml.experimental.hyperopt_utils import plotting_utilsplotting_utils.plot_search_results(results)df_gridsearch = pd.DataFrame(results.cv_results_)
plotting_utils.plot_heatmap(df_gridsearch, "param_max_depth", "param_n_estimators")if data_fraction <= 0.1:
model_cpu_xgb = xgb.XGBClassifier(tree_method='hist')
mode = "cpu-random"
with timed("XGB-" + mode):
res, results = do_HPO(model_cpu_xgb,
params_xgb,
'accuracy',
X_cpu,
y_cpu,
mode=mode,
n_iter=N_ITER)
print("Searched over {} parameters".format(len(results.cv_results_['mean_test_score'])))
print_acc(res , X_cpu, y_cpu, X_test_cpu, y_test_cpu,
mode_str=mode)# KNN-Classifier
model_knn_ = KNeighborsClassifier(n_neighbors=5)
model_knn_.fit(X_train, y_train)
print("Default accuracy {}".format(accuracy_score(model_knn_.predict(X_test), y_test)))model_knn = KNeighborsClassifier(n_neighbors=5)
ks = [i for i in range(1, 40)]
params_knn = {'n_neighbors': ks
}
mode = "gpu-grid"
with timed("KNN-"+mode):
res, results = do_HPO(model_knn,
params_knn,
cuml_accuracy_scorer,
X_train,
y_cpu.astype('int32'),
mode=mode)
res.fit(X_train, y_train)
print("{} accuracy {}".format(mode, accuracy_score(res.predict(X_test), y_test)))df = pd.DataFrame(results.cv_results_)import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
sns.lineplot(x="param_n_neighbors", y="mean_test_score", data=df) | 0 |
rapidsai_public_repos/cuml/python/cuml/experimental | rapidsai_public_repos/cuml/python/cuml/experimental/hyperopt_utils/plotting_utils.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import seaborn as sns
import matplotlib.pyplot as plt
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
pd = cpu_only_import("pandas")
def plot_heatmap(df, col1, col2):
"""
Generates a heatmap to highlight interactions of two parameters specified
in col1 and col2.
Parameters
----------
df : Pandas dataframe
Results from Grid or Random Search
col1 : string; Name of the first parameter
col2: string; Name of the second parameter
"""
max_scores = df.groupby([col1, col2]).max()
max_scores = max_scores.unstack()[["mean_test_score"]]
sns.heatmap(max_scores.mean_test_score, annot=True, fmt=".3g")
def plot_search_results(res):
"""
Plots by fixing all parameters except one parameter to its best value using
matplotlib.
Accepts results from grid or random search from dask-ml.
Parameters
----------
res : results from Grid or Random Search
"""
# Results from grid search
results = res.cv_results_
means_test = results["mean_test_score"]
stds_test = results["std_test_score"]
# Getting indexes of values per hyper-parameter
masks = []
masks_names = list(res.best_params_.keys())
for p_k, p_v in res.best_params_.items():
masks.append(list(results["param_" + p_k].data == p_v))
try:
# Grid Search
params = res.param_grid
# Plotting results
fig, ax = plt.subplots(
1, len(params), sharex="none", sharey="all", figsize=(20, 5)
)
fig.suptitle("Score per parameter")
fig.text(0.04, 0.5, "MEAN SCORE", va="center", rotation="vertical")
pram_preformace_in_best = {}
for i, p in enumerate(masks_names):
m = np.stack(masks[:i] + masks[i + 1 :])
pram_preformace_in_best
best_parms_mask = m.all(axis=0)
best_index = np.where(best_parms_mask)[0]
x = np.array(params[p])
y_1 = np.array(means_test[best_index])
e_1 = np.array(stds_test[best_index])
ax[i].errorbar(
x, y_1, e_1, linestyle="--", marker="o", label="test"
)
ax[i].set_xlabel(p.upper())
except Exception as e:
# Randomized Search
print("Cannot generate plots because of ", type(e), "trying again...")
try:
params = res.param_distributions
# Plotting results
fig, ax = plt.subplots(
1, len(params), sharex="none", sharey="all", figsize=(20, 5)
)
fig.suptitle("Score per parameter")
fig.text(0.04, 0.5, "MEAN SCORE", va="center", rotation="vertical")
for i, p in enumerate(masks_names):
results = pd.DataFrame(res.cv_results_)
select_names = masks_names[:i] + masks_names[i + 1 :]
for j in select_names:
best_value = res.best_params_[j]
results = results[results["param_" + j] == best_value]
x = np.array(results["param_" + p])
y_1 = np.array(results["mean_test_score"])
e_1 = np.array(results["std_test_score"])
ax[i].errorbar(
x, y_1, e_1, linestyle="--", marker="o", label="test"
)
ax[i].set_xlabel(p.upper())
except Exception as e:
# Something else broke while attempting to plot
print("Cannot generate plots because of ", type(e))
return
plt.legend()
plt.show()
| 0 |
rapidsai_public_repos/cuml/python/cuml/experimental | rapidsai_public_repos/cuml/python/cuml/experimental/hyperopt_utils/__init__.py | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.experimental.hyperopt_utils import plotting_utils
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/compose/__init__.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml._thirdparty.sklearn.preprocessing import (
ColumnTransformer,
make_column_transformer,
make_column_selector,
)
__all__ = [
# Classes
"ColumnTransformer",
# Functions
"make_column_transformer",
"make_column_selector",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/ensemble/randomforestregressor.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import (
cpu_only_import,
gpu_only_import,
gpu_only_import_from,
null_decorator
)
np = cpu_only_import('numpy')
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
rmm = gpu_only_import('rmm')
from cuml.internals.array import CumlArray
import cuml.internals
from cuml.internals.mixins import RegressorMixin
from cuml.common.doc_utils import generate_docstring
from cuml.common.doc_utils import insert_into_docstring
from cuml.common import input_to_cuml_array
from cuml.ensemble.randomforest_common import BaseRandomForestModel
from cuml.ensemble.randomforest_common import _obtain_fil_model
from cuml.ensemble.randomforest_shared cimport *
from cuml.fil.fil import TreeliteModel
from libcpp cimport bool
from libc.stdint cimport uintptr_t, uint64_t
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
from pylibraft.common.handle cimport handle_t
cimport cuml.common.cuda
cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML":
cdef void fit(handle_t& handle,
RandomForestMetaData[float, float]*,
float*,
int,
int,
float*,
RF_params,
int) except +
cdef void fit(handle_t& handle,
RandomForestMetaData[double, double]*,
double*,
int,
int,
double*,
RF_params,
int) except +
cdef void predict(handle_t& handle,
RandomForestMetaData[float, float] *,
float*,
int,
int,
float*,
int) except +
cdef void predict(handle_t& handle,
RandomForestMetaData[double, double]*,
double*,
int,
int,
double*,
int) except +
cdef RF_metrics score(handle_t& handle,
RandomForestMetaData[float, float]*,
float*,
int,
float*,
int) except +
cdef RF_metrics score(handle_t& handle,
RandomForestMetaData[double, double]*,
double*,
int,
double*,
int) except +
class RandomForestRegressor(BaseRandomForestModel,
RegressorMixin):
"""
Implements a Random Forest regressor model which fits multiple decision
trees in an ensemble.
.. note:: Note that the underlying algorithm for tree node splits differs
from that used in scikit-learn. By default, the cuML Random Forest uses a
quantile-based algorithm to determine splits, rather than an exact
count. You can tune the size of the quantiles with the `n_bins` parameter
.. note:: You can export cuML Random Forest models and run predictions
with them on machines without an NVIDIA GPUs. See
https://docs.rapids.ai/api/cuml/nightly/pickling_cuml_models.html
for more details.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> from cuml.ensemble import RandomForestRegressor as curfr
>>> X = cp.asarray([[0,10],[0,20],[0,30],[0,40]], dtype=cp.float32)
>>> y = cp.asarray([0.0,1.0,2.0,3.0], dtype=cp.float32)
>>> cuml_model = curfr(max_features=1.0, n_bins=128,
... min_samples_leaf=1,
... min_samples_split=2,
... n_estimators=40, accuracy_metric='r2')
>>> cuml_model.fit(X,y)
RandomForestRegressor()
>>> cuml_score = cuml_model.score(X,y)
>>> print("MSE score of cuml : ", cuml_score) # doctest: +SKIP
MSE score of cuml : 0.9076250195503235
Parameters
----------
n_estimators : int (default = 100)
Number of trees in the forest. (Default changed to 100 in cuML 0.11)
split_criterion : int or string (default = ``2`` (``'mse'``))
The criterion used to split nodes.\n
* ``0`` or ``'gini'`` for gini impurity
* ``1`` or ``'entropy'`` for information gain (entropy)
* ``2`` or ``'mse'`` for mean squared error
* ``4`` or ``'poisson'`` for poisson half deviance
* ``5`` or ``'gamma'`` for gamma half deviance
* ``6`` or ``'inverse_gaussian'`` for inverse gaussian deviance
``0``, ``'gini'``, ``1`` and ``'entropy'`` not valid for regression.
bootstrap : boolean (default = True)
Control bootstrapping.\n
* If ``True``, eachtree in the forest is built
on a bootstrapped sample with replacement.
* If ``False``, the whole dataset is used to build each tree.
max_samples : float (default = 1.0)
Ratio of dataset rows used while fitting each tree.
max_depth : int (default = 16)
Maximum tree depth. Must be greater than 0.
Unlimited depth (i.e, until leaves are pure)
is not supported.\n
.. note:: This default differs from scikit-learn's
random forest, which defaults to unlimited depth.
max_leaves : int (default = -1)
Maximum leaf nodes per tree. Soft constraint. Unlimited,
If ``-1``.
max_features : int, float, or string (default = 'auto')
Ratio of number of features (columns) to consider
per node split.\n
* If type ``int`` then ``max_features`` is the absolute count of
features to be used.
* If type ``float`` then ``max_features`` is used as a fraction.
* If ``'auto'`` then ``max_features=1.0``.
* If ``'sqrt'`` then ``max_features=1/sqrt(n_features)``.
* If ``'log2'`` then ``max_features=log2(n_features)/n_features``.
n_bins : int (default = 128)
Maximum number of bins used by the split algorithm per feature.
For large problems, particularly those with highly-skewed input data,
increasing the number of bins may improve accuracy.
n_streams : int (default = 4 )
Number of parallel streams used for forest building
min_samples_leaf : int or float (default = 1)
The minimum number of samples (rows) in each leaf node.\n
* If type ``int``, then ``min_samples_leaf`` represents the minimum
number.\n
* If ``float``, then ``min_samples_leaf`` represents a fraction and
``ceil(min_samples_leaf * n_rows)`` is the minimum number of
samples for each leaf node.
min_samples_split : int or float (default = 2)
The minimum number of samples required to split an internal
node.\n
* If type ``int``, then min_samples_split represents the minimum
number.
* If type ``float``, then ``min_samples_split`` represents a fraction
and ``max(2, ceil(min_samples_split * n_rows))`` is the minimum
number of samples for each split.
min_impurity_decrease : float (default = 0.0)
The minimum decrease in impurity required for node to be split
accuracy_metric : string (default = 'r2')
Decides the metric used to evaluate the performance of the model.
In the 0.16 release, the default scoring metric was changed
from mean squared error to r-squared.\n
* for r-squared : ``'r2'``
* for median of abs error : ``'median_ae'``
* for mean of abs error : ``'mean_ae'``
* for mean square error' : ``'mse'``
max_batch_size : int (default = 4096)
Maximum number of nodes that can be processed in a given batch.
random_state : int (default = None)
Seed for the random number generator. Unseeded by default. Does not
currently fully guarantee the exact same results.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Notes
-----
**Known Limitations**\n
This is an early release of the cuML
Random Forest code. It contains a few known limitations:
* GPU-based inference is only supported with 32-bit (float32) data-types.
Alternatives are to use CPU-based inference for 64-bit (float64)
data-types, or let the default automatic datatype conversion occur
during GPU inference.
For additional docs, see `scikitlearn's RandomForestRegressor
<https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html>`_.
"""
def __init__(self, *,
split_criterion=2,
accuracy_metric='r2',
handle=None,
verbose=False,
output_type=None,
**kwargs):
self.RF_type = REGRESSION
super().__init__(
split_criterion=split_criterion,
accuracy_metric=accuracy_metric,
handle=handle,
verbose=verbose,
output_type=output_type,
**kwargs)
# TODO: Add the preprocess and postprocess functions in the cython code to
# normalize the labels
def __getstate__(self):
state = self.__dict__.copy()
cdef size_t params_t
cdef RandomForestMetaData[float, float] *rf_forest
cdef RandomForestMetaData[double, double] *rf_forest64
cdef size_t params_t64
if self.n_cols:
# only if model has been fit previously
self._get_serialized_model() # Ensure we have this cached
if self.rf_forest:
params_t = <uintptr_t> self.rf_forest
rf_forest = \
<RandomForestMetaData[float, float]*>params_t
state["rf_params"] = rf_forest.rf_params
if self.rf_forest64:
params_t64 = <uintptr_t> self.rf_forest64
rf_forest64 = \
<RandomForestMetaData[double, double]*>params_t64
state["rf_params64"] = rf_forest64.rf_params
state['n_cols'] = self.n_cols
state["verbose"] = self.verbose
state["treelite_serialized_model"] = self.treelite_serialized_model
state['handle'] = self.handle
state["treelite_handle"] = None
state["split_criterion"] = self.split_criterion
return state
def __setstate__(self, state):
super(RandomForestRegressor, self).__init__(
split_criterion=state["split_criterion"],
handle=state["handle"], verbose=state['verbose'])
cdef RandomForestMetaData[float, float] *rf_forest = \
new RandomForestMetaData[float, float]()
cdef RandomForestMetaData[double, double] *rf_forest64 = \
new RandomForestMetaData[double, double]()
self.n_cols = state['n_cols']
if self.n_cols:
rf_forest.rf_params = state["rf_params"]
state["rf_forest"] = <uintptr_t>rf_forest
rf_forest64.rf_params = state["rf_params64"]
state["rf_forest64"] = <uintptr_t>rf_forest64
self.treelite_serialized_model = state["treelite_serialized_model"]
self.__dict__.update(state)
def __del__(self):
self._reset_forest_data()
def _reset_forest_data(self):
"""Free memory allocated by this instance and clear instance vars."""
if self.rf_forest:
delete_rf_metadata(
<RandomForestMetaData[float, float]*><uintptr_t>
self.rf_forest)
self.rf_forest = 0
if self.rf_forest64:
delete_rf_metadata(
<RandomForestMetaData[double, double]*><uintptr_t>
self.rf_forest64)
self.rf_forest64 = 0
if self.treelite_handle:
TreeliteModel.free_treelite_model(self.treelite_handle)
self.treelite_handle = None
self.treelite_serialized_model = None
self.n_cols = None
def convert_to_treelite_model(self):
"""
Converts the cuML RF model to a Treelite model
Returns
-------
tl_to_fil_model : Treelite version of this model
"""
treelite_handle = self._obtain_treelite_handle()
return TreeliteModel.from_treelite_model_handle(treelite_handle)
def convert_to_fil_model(self, output_class=False,
algo='auto',
fil_sparse_format='auto'):
"""
Create a Forest Inference (FIL) model from the trained cuML
Random Forest model.
Parameters
----------
output_class : boolean (default = False)
This is optional and required only while performing the
predict operation on the GPU.
If true, return a 1 or 0 depending on whether the raw
prediction exceeds the threshold. If False, just return
the raw prediction.
algo : string (default = 'auto')
This is optional and required only while performing the
predict operation on the GPU.
* ``'naive'`` - simple inference using shared memory
* ``'tree_reorg'`` - similar to naive but trees rearranged to be
more coalescing-friendly
* ``'batch_tree_reorg'`` - similar to tree_reorg but predicting
multiple rows per thread block
* ``'auto'`` - choose the algorithm automatically. Currently
* ``'batch_tree_reorg'`` is used for dense storage
and 'naive' for sparse storage
fil_sparse_format : boolean or string (default = 'auto')
This variable is used to choose the type of forest that will be
created in the Forest Inference Library. It is not required
while using predict_model='CPU'.
* ``'auto'`` - choose the storage type automatically
(currently True is chosen by auto)
* ``False`` - create a dense forest
* ``True`` - create a sparse forest, requires algo='naive'
or algo='auto'
Returns
-------
fil_model
A Forest Inference model which can be used to perform
inferencing on the random forest model.
"""
treelite_handle = self._obtain_treelite_handle()
return _obtain_fil_model(treelite_handle=treelite_handle,
depth=self.max_depth,
output_class=output_class,
algo=algo,
fil_sparse_format=fil_sparse_format)
@nvtx_annotate(
message="fit RF-Regressor @randomforestregressor.pyx",
domain="cuml_python")
@generate_docstring()
@cuml.internals.api_base_return_any_skipall
def fit(self, X, y, convert_dtype=True):
"""
Perform Random Forest Regression on the input data
"""
X_m, y_m, max_feature_val = self._dataset_setup_for_fit(X, y,
convert_dtype)
# Reset the old tree data for new fit call
cdef uintptr_t X_ptr, y_ptr
X_ptr = X_m.ptr
y_ptr = y_m.ptr
cdef handle_t* handle_ =\
<handle_t*><uintptr_t>self.handle.getHandle()
cdef RandomForestMetaData[float, float] *rf_forest = \
new RandomForestMetaData[float, float]()
self.rf_forest = <uintptr_t> rf_forest
cdef RandomForestMetaData[double, double] *rf_forest64 = \
new RandomForestMetaData[double, double]()
self.rf_forest64 = <uintptr_t> rf_forest64
if self.random_state is None:
seed_val = <uintptr_t>NULL
else:
seed_val = <uintptr_t>self.random_state
rf_params = set_rf_params(<int> self.max_depth,
<int> self.max_leaves,
<float> max_feature_val,
<int> self.n_bins,
<int> self.min_samples_leaf,
<int> self.min_samples_split,
<float> self.min_impurity_decrease,
<bool> self.bootstrap,
<int> self.n_estimators,
<float> self.max_samples,
<uint64_t> seed_val,
<CRITERION> self.split_criterion,
<int> self.n_streams,
<int> self.max_batch_size)
if self.dtype == np.float32:
fit(handle_[0],
rf_forest,
<float*> X_ptr,
<int> self.n_rows,
<int> self.n_cols,
<float*> y_ptr,
rf_params,
<int> self.verbose)
else:
rf_params64 = rf_params
fit(handle_[0],
rf_forest64,
<double*> X_ptr,
<int> self.n_rows,
<int> self.n_cols,
<double*> y_ptr,
rf_params64,
<int> self.verbose)
# make sure that the `fit` is complete before the following delete
# call happens
self.handle.sync()
del X_m
del y_m
return self
def _predict_model_on_cpu(self, X, convert_dtype) -> CumlArray:
cdef uintptr_t X_ptr
X_m, n_rows, n_cols, dtype = \
input_to_cuml_array(X, order='C',
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_cols=self.n_cols)
X_ptr = X_m.ptr
preds = CumlArray.zeros(n_rows, dtype=dtype)
cdef uintptr_t preds_ptr = preds.ptr
cdef handle_t* handle_ =\
<handle_t*><uintptr_t>self.handle.getHandle()
cdef RandomForestMetaData[float, float] *rf_forest = \
<RandomForestMetaData[float, float]*><uintptr_t> self.rf_forest
cdef RandomForestMetaData[double, double] *rf_forest64 = \
<RandomForestMetaData[double, double]*><uintptr_t> self.rf_forest64
if self.dtype == np.float32:
predict(handle_[0],
rf_forest,
<float*> X_ptr,
<int> n_rows,
<int> n_cols,
<float*> preds_ptr,
<int> self.verbose)
elif self.dtype == np.float64:
predict(handle_[0],
rf_forest64,
<double*> X_ptr,
<int> n_rows,
<int> n_cols,
<double*> preds_ptr,
<int> self.verbose)
else:
raise TypeError("supports only float32 and float64 input,"
" but input of type '%s' passed."
% (str(self.dtype)))
self.handle.sync()
# synchronous w/o a stream
del X_m
return preds
@nvtx_annotate(
message="predict RF-Regressor @randomforestclassifier.pyx",
domain="cuml_python")
@insert_into_docstring(parameters=[('dense', '(n_samples, n_features)')],
return_values=[('dense', '(n_samples, 1)')])
def predict(self, X, predict_model="GPU",
algo='auto', convert_dtype=True,
fil_sparse_format='auto') -> CumlArray:
"""
Predicts the labels for X.
Parameters
----------
X : {}
predict_model : String (default = 'GPU')
'GPU' to predict using the GPU, 'CPU' otherwise.
algo : string (default = 'auto')
This is optional and required only while performing the
predict operation on the GPU.
* ``'naive'`` - simple inference using shared memory
* ``'tree_reorg'`` - similar to naive but trees rearranged to be
more coalescing-friendly
* ``'batch_tree_reorg'`` - similar to tree_reorg but predicting
multiple rows per thread block
* ``'auto'`` - choose the algorithm automatically. Currently
* ``'batch_tree_reorg'`` is used for dense storage
and 'naive' for sparse storage
convert_dtype : bool, optional (default = True)
When set to True, the predict method will, when necessary, convert
the input to the data type which was used to train the model. This
will increase memory used for the method.
fil_sparse_format : boolean or string (default = auto)
This variable is used to choose the type of forest that will be
created in the Forest Inference Library. It is not required
while using predict_model='CPU'.
* ``'auto'`` - choose the storage type automatically
(currently True is chosen by auto)
* ``False`` - create a dense forest
* ``True`` - create a sparse forest, requires algo='naive'
or algo='auto'
Returns
-------
y : {}
"""
if predict_model == "CPU":
preds = self._predict_model_on_cpu(X, convert_dtype)
else:
preds = self._predict_model_on_gpu(
X=X,
algo=algo,
convert_dtype=convert_dtype,
fil_sparse_format=fil_sparse_format)
return preds
@nvtx_annotate(
message="score RF-Regressor @randomforestclassifier.pyx",
domain="cuml_python")
@insert_into_docstring(parameters=[('dense', '(n_samples, n_features)'),
('dense', '(n_samples, 1)')])
def score(self, X, y, algo='auto', convert_dtype=True,
fil_sparse_format='auto', predict_model="GPU"):
"""
Calculates the accuracy metric score of the model for X.
In the 0.16 release, the default scoring metric was changed
from mean squared error to r-squared.
Parameters
----------
X : {}
y : {}
algo : string (default = 'auto')
This is optional and required only while performing the
predict operation on the GPU.
* ``'naive'`` - simple inference using shared memory
* ``'tree_reorg'`` - similar to naive but trees rearranged to be
more coalescing-friendly
* ``'batch_tree_reorg'`` - similar to tree_reorg but predicting
multiple rows per thread block
* ``'auto'`` - choose the algorithm automatically. Currently
* ``'batch_tree_reorg'`` is used for dense storage
and 'naive' for sparse storage
convert_dtype : boolean, default=True
whether to convert input data to correct dtype automatically
predict_model : String (default = 'GPU')
'GPU' to predict using the GPU, 'CPU' otherwise. The GPU can only
be used if the model was trained on float32 data and `X` is float32
or convert_dtype is set to True.
fil_sparse_format : boolean or string (default = auto)
This variable is used to choose the type of forest that will be
created in the Forest Inference Library. It is not required
while using predict_model='CPU'.
* ``'auto'`` - choose the storage type automatically
(currently True is chosen by auto)
* ``False`` - create a dense forest
* ``True`` - create a sparse forest, requires algo='naive'
or algo='auto'
Returns
-------
mean_square_error : float or
median_abs_error : float or
mean_abs_error : float
"""
from cuml.metrics.regression import r2_score
cdef uintptr_t y_ptr
_, n_rows, _, dtype = \
input_to_cuml_array(X,
convert_to_dtype=(self.dtype if convert_dtype
else None))
y_m, n_rows, _, _ = \
input_to_cuml_array(y,
convert_to_dtype=(dtype if convert_dtype
else False))
y_ptr = y_m.ptr
preds = self.predict(X, algo=algo,
convert_dtype=convert_dtype,
fil_sparse_format=fil_sparse_format,
predict_model=predict_model)
cdef uintptr_t preds_ptr
preds_m, _, _, _ = \
input_to_cuml_array(preds, convert_to_dtype=dtype)
preds_ptr = preds_m.ptr
# shortcut for default accuracy metric of r^2
if self.accuracy_metric == "r2":
stats = r2_score(y_m, preds, handle=self.handle)
self.handle.sync()
del y_m
del preds_m
return stats
cdef handle_t* handle_ =\
<handle_t*><uintptr_t>self.handle.getHandle()
cdef RandomForestMetaData[float, float] *rf_forest = \
<RandomForestMetaData[float, float]*><uintptr_t> self.rf_forest
cdef RandomForestMetaData[double, double] *rf_forest64 = \
<RandomForestMetaData[double, double]*><uintptr_t> self.rf_forest64
if self.dtype == np.float32:
self.temp_stats = score(handle_[0],
rf_forest,
<float*> y_ptr,
<int> n_rows,
<float*> preds_ptr,
<int> self.verbose)
elif self.dtype == np.float64:
self.temp_stats = score(handle_[0],
rf_forest64,
<double*> y_ptr,
<int> n_rows,
<double*> preds_ptr,
<int> self.verbose)
if self.accuracy_metric == 'median_ae':
stats = self.temp_stats['median_abs_error']
if self.accuracy_metric == 'mean_ae':
stats = self.temp_stats['mean_abs_error']
else:
stats = self.temp_stats['mean_squared_error']
self.handle.sync()
del y_m
del preds_m
return stats
def get_summary_text(self):
"""
Obtain the text summary of the random forest model
"""
cdef RandomForestMetaData[float, float] *rf_forest = \
<RandomForestMetaData[float, float]*><uintptr_t> self.rf_forest
cdef RandomForestMetaData[double, double] *rf_forest64 = \
<RandomForestMetaData[double, double]*><uintptr_t> self.rf_forest64
if self.dtype == np.float64:
return get_rf_summary_text(rf_forest64).decode('utf-8')
else:
return get_rf_summary_text(rf_forest).decode('utf-8')
def get_detailed_text(self):
"""
Obtain the detailed information for the random forest model, as text
"""
cdef RandomForestMetaData[float, float] *rf_forest = \
<RandomForestMetaData[float, float]*><uintptr_t> self.rf_forest
cdef RandomForestMetaData[double, double] *rf_forest64 = \
<RandomForestMetaData[double, double]*><uintptr_t> self.rf_forest64
if self.dtype == np.float64:
return get_rf_detailed_text(rf_forest64).decode('utf-8')
else:
return get_rf_detailed_text(rf_forest).decode('utf-8')
def get_json(self):
"""
Export the Random Forest model as a JSON string
"""
cdef RandomForestMetaData[float, float] *rf_forest = \
<RandomForestMetaData[float, float]*><uintptr_t> self.rf_forest
cdef RandomForestMetaData[double, double] *rf_forest64 = \
<RandomForestMetaData[double, double]*><uintptr_t> self.rf_forest64
if self.dtype == np.float64:
return get_rf_json(rf_forest64).decode('utf-8')
return get_rf_json(rf_forest).decode('utf-8')
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/ensemble/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("randomforest_common.pyx" ${randomforestclassifier_algo} ${randomforestregressor_algo} ${ensemble_algo})
add_module_gpu_default("randomforest_shared.pyx" ${randomforestclassifier_algo} ${randomforestregressor_algo} ${ensemble_algo})
add_module_gpu_default("randomforestclassifier.pyx" ${randomforestclassifier_algo} ${ensemble_algo})
add_module_gpu_default("randomforestregressor.pyx" ${randomforestregressor_algo} ${ensemble_algo})
set(linked_libraries
${cuml_sg_libraries}
${TREELITE_LIBS})
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}"
MODULE_PREFIX ensemble_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/ensemble/randomforest_shared.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from libcpp.vector cimport vector
from cpython.object cimport PyObject
from libc.stdint cimport uintptr_t
from libcpp.memory cimport unique_ptr
from typing import Dict, List, Union
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
cdef extern from "treelite/tree.h" namespace "treelite":
cdef struct PyBufferFrame:
void* buf
char* format
size_t itemsize
size_t nitem
cdef cppclass Model:
vector[PyBufferFrame] GetPyBuffer() except +
@staticmethod
unique_ptr[Model] CreateFromPyBuffer(vector[PyBufferFrame]) except +
cdef extern from "Python.h":
Py_buffer* PyMemoryView_GET_BUFFER(PyObject* mview)
cdef class PyBufferFrameWrapper:
cdef PyBufferFrame _handle
cdef Py_ssize_t shape[1]
cdef Py_ssize_t strides[1]
def __cinit__(self):
pass
def __dealloc__(self):
pass
def __getbuffer__(self, Py_buffer* buffer, int flags):
cdef Py_ssize_t itemsize = self._handle.itemsize
self.shape[0] = self._handle.nitem
self.strides[0] = itemsize
buffer.buf = self._handle.buf
buffer.format = self._handle.format
buffer.internal = NULL
buffer.itemsize = itemsize
buffer.len = self._handle.nitem * itemsize
buffer.ndim = 1
buffer.obj = self
buffer.readonly = 0
buffer.shape = self.shape
buffer.strides = self.strides
buffer.suboffsets = NULL
def __releasebuffer__(self, Py_buffer *buffer):
pass
cdef PyBufferFrameWrapper MakePyBufferFrameWrapper(PyBufferFrame handle):
cdef PyBufferFrameWrapper wrapper = PyBufferFrameWrapper()
wrapper._handle = handle
return wrapper
cdef list _get_frames(ModelHandle model):
return [memoryview(MakePyBufferFrameWrapper(v))
for v in (<Model*>model).GetPyBuffer()]
cdef ModelHandle _init_from_frames(vector[PyBufferFrame] frames) except *:
return <ModelHandle>Model.CreateFromPyBuffer(frames).release()
def get_frames(model: uintptr_t) -> List[memoryview]:
return _get_frames(<ModelHandle> model)
def init_from_frames(frames: List[np.ndarray],
format_str: List[str], itemsize: List[int]) -> uintptr_t:
cdef vector[PyBufferFrame] cpp_frames
cdef Py_buffer* buf
cdef PyBufferFrame cpp_frame
format_bytes = [s.encode('utf-8') for s in format_str]
for i, frame in enumerate(frames):
x = memoryview(frame)
buf = PyMemoryView_GET_BUFFER(<PyObject*>x)
cpp_frame.buf = buf.buf
cpp_frame.format = format_bytes[i]
cpp_frame.itemsize = itemsize[i]
cpp_frame.nitem = buf.len // itemsize[i]
cpp_frames.push_back(cpp_frame)
return <uintptr_t> _init_from_frames(cpp_frames)
def treelite_serialize(
model: uintptr_t
) -> Dict[str, Union[List[str], List[np.ndarray]]]:
frames = get_frames(model)
header = {'format_str': [x.format for x in frames],
'itemsize': [x.itemsize for x in frames]}
return {'header': header, 'frames': [np.asarray(x) for x in frames]}
def treelite_deserialize(
payload: Dict[str, Union[List[str], List[bytes]]]
) -> uintptr_t:
header, frames = payload['header'], payload['frames']
return init_from_frames(frames, header['format_str'], header['itemsize'])
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/ensemble/randomforest_common.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
import math
import warnings
import typing
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml import ForestInference
from cuml.fil.fil import TreeliteModel
from pylibraft.common.handle import Handle
from cuml.internals.base import Base
from cuml.internals.array import CumlArray
from cuml.common.exceptions import NotFittedError
import cuml.internals
from cython.operator cimport dereference as deref
from cuml.ensemble.randomforest_shared import treelite_serialize, \
treelite_deserialize
from cuml.ensemble.randomforest_shared cimport *
from cuml.common import input_to_cuml_array
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.prims.label.classlabels import make_monotonic, check_labels
class BaseRandomForestModel(Base):
_param_names = ['n_estimators', 'max_depth', 'handle',
'max_features', 'n_bins',
'split_criterion', 'min_samples_leaf',
'min_samples_split',
'min_impurity_decrease',
'bootstrap',
'verbose', 'max_samples',
'max_leaves',
'accuracy_metric', 'max_batch_size',
'n_streams', 'dtype',
'output_type', 'min_weight_fraction_leaf', 'n_jobs',
'max_leaf_nodes', 'min_impurity_split', 'oob_score',
'random_state', 'warm_start', 'class_weight',
'criterion']
criterion_dict = {'0': GINI, 'gini': GINI,
'1': ENTROPY, 'entropy': ENTROPY,
'2': MSE, 'mse': MSE,
'3': MAE, 'mae': MAE,
'4': POISSON, 'poisson': POISSON,
'5': GAMMA, 'gamma': GAMMA,
'6': INVERSE_GAUSSIAN,
'inverse_gaussian': INVERSE_GAUSSIAN,
'7': CRITERION_END}
classes_ = CumlArrayDescriptor()
def __init__(self, *, split_criterion, n_streams=4, n_estimators=100,
max_depth=16, handle=None, max_features='auto', n_bins=128,
bootstrap=True,
verbose=False, min_samples_leaf=1, min_samples_split=2,
max_samples=1.0, max_leaves=-1, accuracy_metric=None,
dtype=None, output_type=None, min_weight_fraction_leaf=None,
n_jobs=None, max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, oob_score=None, random_state=None,
warm_start=None, class_weight=None,
criterion=None,
max_batch_size=4096, **kwargs):
sklearn_params = {"criterion": criterion,
"min_weight_fraction_leaf": min_weight_fraction_leaf,
"max_leaf_nodes": max_leaf_nodes,
"min_impurity_split": min_impurity_split,
"oob_score": oob_score, "n_jobs": n_jobs,
"warm_start": warm_start,
"class_weight": class_weight}
for key, vals in sklearn_params.items():
if vals:
raise TypeError(
" The Scikit-learn variable ", key,
" is not supported in cuML,"
" please read the cuML documentation at "
"(https://docs.rapids.ai/api/cuml/nightly/"
"api.html#random-forest) for more information")
for key in kwargs.keys():
if key not in self._param_names:
raise TypeError(
" The variable ", key,
" is not supported in cuML,"
" please read the cuML documentation at "
"(https://docs.rapids.ai/api/cuml/nightly/"
"api.html#random-forest) for more information")
if ((random_state is not None) and (n_streams != 1)):
warnings.warn("For reproducible results in Random Forest"
" Classifier or for almost reproducible results"
" in Random Forest Regressor, n_streams=1 is "
"recommended. If n_streams is > 1, results may vary "
"due to stream/thread timing differences, even when "
"random_state is set")
if handle is None:
handle = Handle(n_streams=n_streams)
super(BaseRandomForestModel, self).__init__(
handle=handle,
verbose=verbose,
output_type=output_type)
if max_depth <= 0:
raise ValueError("Must specify max_depth >0 ")
if (str(split_criterion) not in
BaseRandomForestModel.criterion_dict.keys()):
warnings.warn("The split criterion chosen was not present"
" in the list of options accepted by the model"
" and so the CRITERION_END option has been chosen.")
self.split_criterion = CRITERION_END
else:
self.split_criterion = \
BaseRandomForestModel.criterion_dict[str(split_criterion)]
self.min_samples_leaf = min_samples_leaf
self.min_samples_split = min_samples_split
self.min_impurity_decrease = min_impurity_decrease
self.max_samples = max_samples
self.max_leaves = max_leaves
self.n_estimators = n_estimators
self.max_depth = max_depth
self.max_features = max_features
self.bootstrap = bootstrap
self.n_bins = n_bins
self.n_cols = None
self.dtype = dtype
self.accuracy_metric = accuracy_metric
self.max_batch_size = max_batch_size
self.n_streams = n_streams
self.random_state = random_state
self.rf_forest = 0
self.rf_forest64 = 0
self.model_pbuf_bytes = bytearray()
self.treelite_handle = None
self.treelite_serialized_model = None
def _get_max_feat_val(self) -> float:
if isinstance(self.max_features, int):
return self.max_features/self.n_cols
elif isinstance(self.max_features, float):
return self.max_features
elif self.max_features == 'sqrt':
return 1/np.sqrt(self.n_cols)
elif self.max_features == 'log2':
return math.log2(self.n_cols)/self.n_cols
elif self.max_features == 'auto':
if self.RF_type == CLASSIFICATION:
return 1/np.sqrt(self.n_cols)
else:
return 1.0
else:
raise ValueError(
"Wrong value passed in for max_features"
" please read the documentation present at "
"(https://docs.rapids.ai/api/cuml/nightly/api.html"
"#random-forest)")
def _get_serialized_model(self):
"""
Returns the self.model_pbuf_bytes.
Cuml RF model gets converted to treelite protobuf bytes by:
* Converting the cuml RF model to a treelite model. The treelite
models handle (pointer) is returned
* The treelite model handle is used to convert the treelite model
to a treelite protobuf model which is stored in a temporary file.
The protobuf model information is read from the temporary file and
the byte information is returned.
The treelite handle is stored `self.treelite_handle` and the treelite
protobuf model bytes are stored in `self.model_pbuf_bytes`. If either
of information is already present in the model then the respective
step is skipped.
"""
if self.treelite_serialized_model:
return self.treelite_serialized_model
elif self.treelite_handle:
fit_mod_ptr = self.treelite_handle
else:
fit_mod_ptr = self._obtain_treelite_handle()
cdef uintptr_t model_ptr = <uintptr_t> fit_mod_ptr
self.treelite_serialized_model = treelite_serialize(model_ptr)
return self.treelite_serialized_model
def _obtain_treelite_handle(self):
if (not self.treelite_serialized_model) and (not self.rf_forest):
raise NotFittedError(
"Attempting to create treelite from un-fit forest.")
cdef ModelHandle tl_handle = NULL
if self.treelite_handle:
return self.treelite_handle # Use cached version
elif self.treelite_serialized_model: # bytes -> Treelite
tl_handle = <ModelHandle><uintptr_t>treelite_deserialize(
self.treelite_serialized_model)
else:
if self.dtype not in [np.float32, np.float64]:
raise ValueError("Unknown dtype.")
if self.RF_type == CLASSIFICATION:
if self.dtype==np.float32:
build_treelite_forest(
&tl_handle,
<RandomForestMetaData[float, int]*>
<uintptr_t> self.rf_forest,
<int> self.n_cols
)
elif self.dtype==np.float64:
build_treelite_forest(
&tl_handle,
<RandomForestMetaData[double, int]*>
<uintptr_t> self.rf_forest64,
<int> self.n_cols
)
else:
if self.dtype==np.float32:
build_treelite_forest(
&tl_handle,
<RandomForestMetaData[float, float]*>
<uintptr_t> self.rf_forest,
<int> self.n_cols
)
elif self.dtype==np.float64:
build_treelite_forest(
&tl_handle,
<RandomForestMetaData[double, double]*>
<uintptr_t> self.rf_forest64,
<int> self.n_cols
)
self.treelite_handle = <uintptr_t> tl_handle
return self.treelite_handle
@cuml.internals.api_base_return_generic(set_output_type=True,
set_n_features_in=True,
get_output_type=False)
def _dataset_setup_for_fit(
self, X, y,
convert_dtype) -> typing.Tuple[CumlArray, CumlArray, float]:
# Reset the old tree data for new fit call
self._reset_forest_data()
X_m, self.n_rows, self.n_cols, self.dtype = \
input_to_cuml_array(X, check_dtype=[np.float32, np.float64],
order='F')
if self.n_bins > self.n_rows:
warnings.warn("The number of bins, `n_bins` is greater than "
"the number of samples used for training. "
"Changing `n_bins` to number of training samples.")
self.n_bins = self.n_rows
if self.RF_type == CLASSIFICATION:
y_m, _, _, y_dtype = \
input_to_cuml_array(
y, check_dtype=np.int32,
convert_to_dtype=(np.int32 if convert_dtype
else None),
check_rows=self.n_rows, check_cols=1)
if y_dtype != np.int32:
raise TypeError("The labels `y` need to be of dtype"
" `int32`")
self.classes_ = cp.unique(y_m)
self.num_classes = len(self.classes_)
self.use_monotonic = not check_labels(
y_m, cp.arange(self.num_classes, dtype=np.int32))
if self.use_monotonic:
y_m, _ = make_monotonic(y_m)
else:
y_m, _, _, y_dtype = \
input_to_cuml_array(
y,
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_rows=self.n_rows, check_cols=1)
if self.dtype == np.float64:
warnings.warn("To use pickling first train using float32 data "
"to fit the estimator")
max_feature_val = self._get_max_feat_val()
if isinstance(self.min_samples_leaf, float):
self.min_samples_leaf = \
math.ceil(self.min_samples_leaf * self.n_rows)
if isinstance(self.min_samples_split, float):
self.min_samples_split = \
max(2, math.ceil(self.min_samples_split * self.n_rows))
return X_m, y_m, max_feature_val
def _tl_handle_from_bytes(self, treelite_serialized_model):
if not treelite_serialized_model:
raise ValueError(
'_tl_handle_from_bytes() requires non-empty serialized model')
return treelite_deserialize(treelite_serialized_model)
def _concatenate_treelite_handle(self, treelite_handle):
cdef ModelHandle concat_model_handle = NULL
cdef vector[ModelHandle] *model_handles \
= new vector[ModelHandle]()
cdef uintptr_t mod_ptr
for i in treelite_handle:
mod_ptr = <uintptr_t>i
model_handles.push_back((
<ModelHandle> mod_ptr))
self._reset_forest_data()
concat_model_handle = concatenate_trees(deref(model_handles))
cdef uintptr_t concat_model_ptr = <uintptr_t> concat_model_handle
self.treelite_handle = concat_model_ptr
self.treelite_serialized_model = treelite_serialize(concat_model_ptr)
# Fix up some instance variables that should match the new TL model
tl_model = TreeliteModel.from_treelite_model_handle(
self.treelite_handle,
take_handle_ownership=False)
self.n_cols = tl_model.num_features
self.n_estimators = tl_model.num_trees
return self
def _predict_model_on_gpu(self, X, algo, convert_dtype,
fil_sparse_format, threshold=0.5,
output_class=False,
predict_proba=False) -> CumlArray:
treelite_handle = self._obtain_treelite_handle()
storage_type = \
_check_fil_parameter_validity(depth=self.max_depth,
fil_sparse_format=fil_sparse_format,
algo=algo)
fil_model = ForestInference(handle=self.handle, verbose=self.verbose,
output_type=self.output_type)
tl_to_fil_model = \
fil_model.load_using_treelite_handle(treelite_handle,
output_class=output_class,
threshold=threshold,
algo=algo,
storage_type=storage_type)
if (predict_proba):
preds = tl_to_fil_model.predict_proba(X)
else:
preds = tl_to_fil_model.predict(X)
return preds
def get_param_names(self):
return super().get_param_names() + BaseRandomForestModel._param_names
def set_params(self, **params):
self.treelite_serialized_model = None
super().set_params(**params)
return self
def _check_fil_parameter_validity(depth, algo, fil_sparse_format):
"""
Check if the FIL storage format type passed by the user is right
for the trained cuml Random Forest model they have.
Parameters
----------
depth : max depth value used to train model
algo : string (default = 'auto')
This is optional and required only while performing the
predict operation on the GPU.
* ``'naive'`` - simple inference using shared memory
* ``'tree_reorg'`` - similar to naive but trees rearranged to be more
coalescing-friendly
* ``'batch_tree_reorg'`` - similar to tree_reorg but predicting
multiple rows per thread block
* ``'auto'`` - choose the algorithm automatically. Currently
* ``'batch_tree_reorg'`` is used for dense storage
and 'naive' for sparse storage
fil_sparse_format : boolean or string (default = 'auto')
This variable is used to choose the type of forest that will be
created in the Forest Inference Library. It is not required
while using predict_model='CPU'.
* ``'auto'`` - choose the storage type automatically
(currently True is chosen by auto)
* ``False`` - create a dense forest
* ``True`` - create a sparse forest, requires algo='naive'
or algo='auto'
Returns
----------
fil_sparse_format
"""
accepted_fil_spars_format = {True, False, 'auto'}
if (depth > 16 and (fil_sparse_format is False or
algo == 'tree_reorg' or
algo == 'batch_tree_reorg')):
raise ValueError("While creating a forest with max_depth greater "
"than 16, `fil_sparse_format` should be True. "
"If `fil_sparse_format=False` then the memory"
"consumed while creating the FIL forest is very "
"large and the process will be aborted. In "
"addition, `algo` must be either set to `naive' "
"or `auto` to set 'fil_sparse_format=True`.")
if fil_sparse_format not in accepted_fil_spars_format:
raise ValueError(
"The value entered for spares_forest is not "
"supported. Please refer to the documentation at "
"(https://docs.rapids.ai/api/cuml/nightly/api.html"
"#forest-inferencing) to see the accepted values.")
return fil_sparse_format
def _obtain_fil_model(treelite_handle, depth,
output_class=True,
threshold=0.5, algo='auto',
fil_sparse_format='auto'):
"""
Creates a Forest Inference (FIL) model using the treelite
handle obtained from the cuML Random Forest model.
Returns
----------
fil_model :
A Forest Inference model which can be used to perform
inferencing on the random forest model.
"""
storage_format = \
_check_fil_parameter_validity(depth=depth,
fil_sparse_format=fil_sparse_format,
algo=algo)
# Use output_type="input" to prevent an error
fil_model = ForestInference(output_type="input")
tl_to_fil_model = \
fil_model.load_using_treelite_handle(treelite_handle,
output_class=output_class,
threshold=threshold,
algo=algo,
storage_type=storage_format)
return tl_to_fil_model
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/ensemble/randomforestclassifier.pyx |
#
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import (
cpu_only_import,
gpu_only_import,
gpu_only_import_from,
null_decorator
)
np = cpu_only_import('numpy')
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
rmm = gpu_only_import('rmm')
from cuml.internals.array import CumlArray
from cuml.internals.mixins import ClassifierMixin
import cuml.internals
from cuml.common.doc_utils import generate_docstring
from cuml.common.doc_utils import insert_into_docstring
from cuml.common import input_to_cuml_array
from cuml.ensemble.randomforest_common import BaseRandomForestModel
from cuml.ensemble.randomforest_common import _obtain_fil_model
from cuml.ensemble.randomforest_shared cimport *
from cuml.fil.fil import TreeliteModel
from libcpp cimport bool
from libc.stdint cimport uintptr_t, uint64_t
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
from cuml.prims.label.classlabels import check_labels, invert_labels
from pylibraft.common.handle cimport handle_t
cimport cuml.common.cuda
cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML":
cdef void fit(handle_t& handle,
RandomForestMetaData[float, int]*,
float*,
int,
int,
int*,
int,
RF_params,
int) except +
cdef void fit(handle_t& handle,
RandomForestMetaData[double, int]*,
double*,
int,
int,
int*,
int,
RF_params,
int) except +
cdef void predict(handle_t& handle,
RandomForestMetaData[float, int] *,
float*,
int,
int,
int*,
bool) except +
cdef void predict(handle_t& handle,
RandomForestMetaData[double, int]*,
double*,
int,
int,
int*,
bool) except +
cdef RF_metrics score(handle_t& handle,
RandomForestMetaData[float, int]*,
int*,
int,
int*,
bool) except +
cdef RF_metrics score(handle_t& handle,
RandomForestMetaData[double, int]*,
int*,
int,
int*,
bool) except +
class RandomForestClassifier(BaseRandomForestModel,
ClassifierMixin):
"""
Implements a Random Forest classifier model which fits multiple decision
tree classifiers in an ensemble.
.. note:: Note that the underlying algorithm for tree node splits differs
from that used in scikit-learn. By default, the cuML Random Forest uses a
quantile-based algorithm to determine splits, rather than an exact
count. You can tune the size of the quantiles with the `n_bins`
parameter.
.. note:: You can export cuML Random Forest models and run predictions
with them on machines without an NVIDIA GPUs. See
https://docs.rapids.ai/api/cuml/nightly/pickling_cuml_models.html
for more details.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> from cuml.ensemble import RandomForestClassifier as cuRFC
>>> X = cp.random.normal(size=(10,4)).astype(cp.float32)
>>> y = cp.asarray([0,1]*5, dtype=cp.int32)
>>> cuml_model = cuRFC(max_features=1.0,
... n_bins=8,
... n_estimators=40)
>>> cuml_model.fit(X,y)
RandomForestClassifier()
>>> cuml_predict = cuml_model.predict(X)
>>> print("Predicted labels : ", cuml_predict)
Predicted labels : [0. 1. 0. 1. 0. 1. 0. 1. 0. 1.]
Parameters
----------
n_estimators : int (default = 100)
Number of trees in the forest. (Default changed to 100 in cuML 0.11)
split_criterion : int or string (default = ``0`` (``'gini'``))
The criterion used to split nodes.\n
* ``0`` or ``'gini'`` for gini impurity
* ``1`` or ``'entropy'`` for information gain (entropy)
* ``2`` or ``'mse'`` for mean squared error
* ``4`` or ``'poisson'`` for poisson half deviance
* ``5`` or ``'gamma'`` for gamma half deviance
* ``6`` or ``'inverse_gaussian'`` for inverse gaussian deviance
only ``0``/``'gini'`` and ``1``/``'entropy'`` valid for classification
bootstrap : boolean (default = True)
Control bootstrapping.\n
* If ``True``, eachtree in the forest is built on a bootstrapped
sample with replacement.
* If ``False``, the whole dataset is used to build each tree.
max_samples : float (default = 1.0)
Ratio of dataset rows used while fitting each tree.
max_depth : int (default = 16)
Maximum tree depth. Must be greater than 0.
Unlimited depth (i.e, until leaves are pure)
is not supported.\n
.. note:: This default differs from scikit-learn's
random forest, which defaults to unlimited depth.
max_leaves : int (default = -1)
Maximum leaf nodes per tree. Soft constraint. Unlimited,
If ``-1``.
max_features : int, float, or string (default = 'auto')
Ratio of number of features (columns) to consider per node
split.\n
* If type ``int`` then ``max_features`` is the absolute count of
features to be used
* If type ``float`` then ``max_features`` is used as a fraction.
* If ``'auto'`` then ``max_features=1/sqrt(n_features)``.
* If ``'sqrt'`` then ``max_features=1/sqrt(n_features)``.
* If ``'log2'`` then ``max_features=log2(n_features)/n_features``.
n_bins : int (default = 128)
Maximum number of bins used by the split algorithm per feature.
For large problems, particularly those with highly-skewed input data,
increasing the number of bins may improve accuracy.
n_streams : int (default = 4)
Number of parallel streams used for forest building.
min_samples_leaf : int or float (default = 1)
The minimum number of samples (rows) in each leaf node.\n
* If type ``int``, then ``min_samples_leaf`` represents the minimum
number.
* If ``float``, then ``min_samples_leaf`` represents a fraction and
``ceil(min_samples_leaf * n_rows)`` is the minimum number of
samples for each leaf node.
min_samples_split : int or float (default = 2)
The minimum number of samples required to split an internal node.\n
* If type ``int``, then min_samples_split represents the minimum
number.
* If type ``float``, then ``min_samples_split`` represents a fraction
and ``max(2, ceil(min_samples_split * n_rows))`` is the minimum
number of samples for each split.
min_impurity_decrease : float (default = 0.0)
Minimum decrease in impurity required for
node to be split.
max_batch_size : int (default = 4096)
Maximum number of nodes that can be processed in a given batch.
random_state : int (default = None)
Seed for the random number generator. Unseeded by default. Does not
currently fully guarantee the exact same results.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
Notes
-----
**Known Limitations**\n
This is an early release of the cuML
Random Forest code. It contains a few known limitations:
* GPU-based inference is only supported with 32-bit (float32) datatypes.
Alternatives are to use CPU-based inference for 64-bit (float64)
datatypes, or let the default automatic datatype conversion occur
during GPU inference.
* While training the model for multi class classification problems,
using deep trees or `max_features=1.0` provides better performance.
For additional docs, see `scikitlearn's RandomForestClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html>`_.
"""
def __init__(self, *, split_criterion=0, handle=None, verbose=False,
output_type=None,
**kwargs):
self.RF_type = CLASSIFICATION
self.num_classes = 2
super().__init__(
split_criterion=split_criterion,
handle=handle,
verbose=verbose,
output_type=output_type,
**kwargs)
# TODO: Add the preprocess and postprocess functions in the cython code to
# normalize the labels
# Link to the above issue on github:
# https://github.com/rapidsai/cuml/issues/691
def __getstate__(self):
state = self.__dict__.copy()
cdef size_t params_t
cdef RandomForestMetaData[float, int] *rf_forest
cdef RandomForestMetaData[double, int] *rf_forest64
cdef size_t params_t64
if self.n_cols:
# only if model has been fit previously
self._get_serialized_model() # Ensure we have this cached
if self.rf_forest:
params_t = <uintptr_t> self.rf_forest
rf_forest = \
<RandomForestMetaData[float, int]*>params_t
state["rf_params"] = rf_forest.rf_params
if self.rf_forest64:
params_t64 = <uintptr_t> self.rf_forest64
rf_forest64 = \
<RandomForestMetaData[double, int]*>params_t64
state["rf_params64"] = rf_forest64.rf_params
state["n_cols"] = self.n_cols
state["verbose"] = self.verbose
state["treelite_serialized_model"] = self.treelite_serialized_model
state["treelite_handle"] = None
state["split_criterion"] = self.split_criterion
state["handle"] = self.handle
return state
def __setstate__(self, state):
super(RandomForestClassifier, self).__init__(
split_criterion=state["split_criterion"],
handle=state["handle"],
verbose=state["verbose"])
cdef RandomForestMetaData[float, int] *rf_forest = \
new RandomForestMetaData[float, int]()
cdef RandomForestMetaData[double, int] *rf_forest64 = \
new RandomForestMetaData[double, int]()
self.n_cols = state['n_cols']
if self.n_cols:
rf_forest.rf_params = state["rf_params"]
state["rf_forest"] = <uintptr_t>rf_forest
rf_forest64.rf_params = state["rf_params64"]
state["rf_forest64"] = <uintptr_t>rf_forest64
self.treelite_serialized_model = state["treelite_serialized_model"]
self.__dict__.update(state)
def __del__(self):
self._reset_forest_data()
def _reset_forest_data(self):
"""Free memory allocated by this instance and clear instance vars."""
if self.rf_forest:
delete_rf_metadata(
<RandomForestMetaData[float, int]*><uintptr_t>
self.rf_forest)
self.rf_forest = 0
if self.rf_forest64:
delete_rf_metadata(
<RandomForestMetaData[double, int]*><uintptr_t>
self.rf_forest64)
self.rf_forest64 = 0
if self.treelite_handle:
TreeliteModel.free_treelite_model(self.treelite_handle)
self.treelite_handle = None
self.treelite_serialized_model = None
self.n_cols = None
def convert_to_treelite_model(self):
"""
Converts the cuML RF model to a Treelite model
Returns
-------
tl_to_fil_model : Treelite version of this model
"""
treelite_handle = self._obtain_treelite_handle()
return TreeliteModel.from_treelite_model_handle(treelite_handle)
def convert_to_fil_model(self, output_class=True,
threshold=0.5, algo='auto',
fil_sparse_format='auto'):
"""
Create a Forest Inference (FIL) model from the trained cuML
Random Forest model.
Parameters
----------
output_class : boolean (default = True)
This is optional and required only while performing the
predict operation on the GPU.
If true, return a 1 or 0 depending on whether the raw
prediction exceeds the threshold. If False, just return
the raw prediction.
algo : string (default = 'auto')
This is optional and required only while performing the
predict operation on the GPU.
* ``'naive'`` - simple inference using shared memory
* ``'tree_reorg'`` - similar to naive but trees rearranged to be
more coalescing-friendly
* ``'batch_tree_reorg'`` - similar to tree_reorg but predicting
multiple rows per thread block
* ``'auto'`` - choose the algorithm automatically. Currently
* ``'batch_tree_reorg'`` is used for dense storage
and 'naive' for sparse storage
threshold : float (default = 0.5)
Threshold used for classification. Optional and required only
while performing the predict operation on the GPU.
It is applied if output_class == True, else it is ignored
fil_sparse_format : boolean or string (default = auto)
This variable is used to choose the type of forest that will be
created in the Forest Inference Library. It is not required
while using predict_model='CPU'.
* ``'auto'`` - choose the storage type automatically
(currently True is chosen by auto)
* ``False`` - create a dense forest
* ``True`` - create a sparse forest, requires algo='naive'
or algo='auto'
Returns
-------
fil_model
A Forest Inference model which can be used to perform
inferencing on the random forest model.
"""
treelite_handle = self._obtain_treelite_handle()
return _obtain_fil_model(treelite_handle=treelite_handle,
depth=self.max_depth,
output_class=output_class,
threshold=threshold,
algo=algo,
fil_sparse_format=fil_sparse_format)
@nvtx_annotate(
message="fit RF-Classifier @randomforestclassifier.pyx",
domain="cuml_python")
@generate_docstring(skip_parameters_heading=True,
y='dense_intdtype',
convert_dtype_cast='np.float32')
@cuml.internals.api_base_return_any(set_output_type=False,
set_output_dtype=True,
set_n_features_in=False)
def fit(self, X, y, convert_dtype=True):
"""
Perform Random Forest Classification on the input data
Parameters
----------
convert_dtype : bool, optional (default = True)
When set to True, the fit method will, when necessary, convert
y to be of dtype int32. This will increase memory used for
the method.
"""
X_m, y_m, max_feature_val = self._dataset_setup_for_fit(X, y,
convert_dtype)
# Track the labels to see if update is necessary
self.update_labels = not check_labels(y_m, self.classes_)
cdef uintptr_t X_ptr, y_ptr
X_ptr = X_m.ptr
y_ptr = y_m.ptr
cdef handle_t* handle_ =\
<handle_t*><uintptr_t>self.handle.getHandle()
cdef RandomForestMetaData[float, int] *rf_forest = \
new RandomForestMetaData[float, int]()
self.rf_forest = <uintptr_t> rf_forest
cdef RandomForestMetaData[double, int] *rf_forest64 = \
new RandomForestMetaData[double, int]()
self.rf_forest64 = <uintptr_t> rf_forest64
if self.random_state is None:
seed_val = <uintptr_t>NULL
else:
seed_val = <uintptr_t>self.random_state
rf_params = set_rf_params(<int> self.max_depth,
<int> self.max_leaves,
<float> max_feature_val,
<int> self.n_bins,
<int> self.min_samples_leaf,
<int> self.min_samples_split,
<float> self.min_impurity_decrease,
<bool> self.bootstrap,
<int> self.n_estimators,
<float> self.max_samples,
<uint64_t> seed_val,
<CRITERION> self.split_criterion,
<int> self.n_streams,
<int> self.max_batch_size)
if self.dtype == np.float32:
fit(handle_[0],
rf_forest,
<float*> X_ptr,
<int> self.n_rows,
<int> self.n_cols,
<int*> y_ptr,
<int> self.num_classes,
rf_params,
<int> self.verbose)
elif self.dtype == np.float64:
rf_params64 = rf_params
fit(handle_[0],
rf_forest64,
<double*> X_ptr,
<int> self.n_rows,
<int> self.n_cols,
<int*> y_ptr,
<int> self.num_classes,
rf_params64,
<int> self.verbose)
else:
raise TypeError("supports only np.float32 and np.float64 input,"
" but input of type '%s' passed."
% (str(self.dtype)))
# make sure that the `fit` is complete before the following delete
# call happens
self.handle.sync()
del X_m
del y_m
return self
@cuml.internals.api_base_return_array(get_output_dtype=True)
def _predict_model_on_cpu(self, X, convert_dtype) -> CumlArray:
cdef uintptr_t X_ptr
X_m, n_rows, n_cols, _dtype = \
input_to_cuml_array(X, order='C',
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_cols=self.n_cols)
X_ptr = X_m.ptr
preds = CumlArray.zeros(n_rows, dtype=np.int32)
cdef uintptr_t preds_ptr = preds.ptr
cdef handle_t* handle_ =\
<handle_t*><uintptr_t>self.handle.getHandle()
cdef RandomForestMetaData[float, int] *rf_forest = \
<RandomForestMetaData[float, int]*><uintptr_t> self.rf_forest
cdef RandomForestMetaData[double, int] *rf_forest64 = \
<RandomForestMetaData[double, int]*><uintptr_t> self.rf_forest64
if self.dtype == np.float32:
predict(handle_[0],
rf_forest,
<float*> X_ptr,
<int> n_rows,
<int> n_cols,
<int*> preds_ptr,
<int> self.verbose)
elif self.dtype == np.float64:
predict(handle_[0],
rf_forest64,
<double*> X_ptr,
<int> n_rows,
<int> n_cols,
<int*> preds_ptr,
<int> self.verbose)
else:
raise TypeError("supports only np.float32 and np.float64 input,"
" but input of type '%s' passed."
% (str(self.dtype)))
self.handle.sync()
# synchronous w/o a stream
del X_m
return preds
@nvtx_annotate(
message="predict RF-Classifier @randomforestclassifier.pyx",
domain="cuml_python")
@insert_into_docstring(parameters=[('dense', '(n_samples, n_features)')],
return_values=[('dense', '(n_samples, 1)')])
def predict(self, X, predict_model="GPU", threshold=0.5,
algo='auto', convert_dtype=True,
fil_sparse_format='auto') -> CumlArray:
"""
Predicts the labels for X.
Parameters
----------
X : {}
predict_model : String (default = 'GPU')
'GPU' to predict using the GPU, 'CPU' otherwise.
algo : string (default = ``'auto'``)
This is optional and required only while performing the
predict operation on the GPU.
* ``'naive'`` - simple inference using shared memory
* ``'tree_reorg'`` - similar to naive but trees rearranged to be
more coalescing-friendly
* ``'batch_tree_reorg'`` - similar to tree_reorg but predicting
multiple rows per thread block
* ``'auto'`` - choose the algorithm automatically. Currently
* ``'batch_tree_reorg'`` is used for dense storage
and 'naive' for sparse storage
threshold : float (default = 0.5)
Threshold used for classification. Optional and required only
while performing the predict operation on the GPU.
convert_dtype : bool, optional (default = True)
When set to True, the predict method will, when necessary, convert
the input to the data type which was used to train the model. This
will increase memory used for the method.
fil_sparse_format : boolean or string (default = ``'auto'``)
This variable is used to choose the type of forest that will be
created in the Forest Inference Library. It is not required
while using predict_model='CPU'.
* ``'auto'`` - choose the storage type automatically
(currently True is chosen by auto)
* ``False`` - create a dense forest
* ``True`` - create a sparse forest, requires algo='naive'
or algo='auto'
Returns
-------
y : {}
"""
if predict_model == "CPU":
preds = self._predict_model_on_cpu(X,
convert_dtype=convert_dtype)
else:
preds = \
self._predict_model_on_gpu(X=X, output_class=True,
threshold=threshold,
algo=algo,
convert_dtype=convert_dtype,
fil_sparse_format=fil_sparse_format,
predict_proba=False)
if self.update_labels:
preds = preds.to_output().astype(self.classes_.dtype)
preds = invert_labels(preds, self.classes_)
return preds
@insert_into_docstring(parameters=[('dense', '(n_samples, n_features)')],
return_values=[('dense', '(n_samples, 1)')])
def predict_proba(self, X, algo='auto',
convert_dtype=True,
fil_sparse_format='auto') -> CumlArray:
"""
Predicts class probabilities for X. This function uses the GPU
implementation of predict.
Parameters
----------
X : {}
algo : string (default = 'auto')
This is optional and required only while performing the
predict operation on the GPU.
* ``'naive'`` - simple inference using shared memory
* ``'tree_reorg'`` - similar to naive but trees rearranged to be
more coalescing-friendly
* ``'batch_tree_reorg'`` - similar to tree_reorg but predicting
multiple rows per thread block
* ``'auto'`` - choose the algorithm automatically. Currently
* ``'batch_tree_reorg'`` is used for dense storage
and 'naive' for sparse storage
convert_dtype : bool, optional (default = True)
When set to True, the predict method will, when necessary, convert
the input to the data type which was used to train the model. This
will increase memory used for the method.
fil_sparse_format : boolean or string (default = auto)
This variable is used to choose the type of forest that will be
created in the Forest Inference Library. It is not required
while using predict_model='CPU'.
* ``'auto'`` - choose the storage type automatically
(currently True is chosen by auto)
* ``False`` - create a dense forest
* ``True`` - create a sparse forest, requires algo='naive'
or algo='auto'
Returns
-------
y : {}
"""
preds_proba = \
self._predict_model_on_gpu(X, output_class=True,
algo=algo,
convert_dtype=convert_dtype,
fil_sparse_format=fil_sparse_format,
predict_proba=True)
return preds_proba
@nvtx_annotate(
message="score RF-Classifier @randomforestclassifier.pyx",
domain="cuml_python")
@insert_into_docstring(parameters=[('dense', '(n_samples, n_features)'),
('dense_intdtype', '(n_samples, 1)')])
def score(self, X, y, threshold=0.5,
algo='auto', predict_model="GPU",
convert_dtype=True, fil_sparse_format='auto'):
"""
Calculates the accuracy metric score of the model for X.
Parameters
----------
X : {}
y : {}
algo : string (default = 'auto')
This is optional and required only while performing the
predict operation on the GPU.
* ``'naive'`` - simple inference using shared memory
* ``'tree_reorg'`` - similar to naive but trees rearranged to be
more coalescing-friendly
* ``'batch_tree_reorg'`` - similar to tree_reorg but predicting
multiple rows per thread block
* ``'auto'`` - choose the algorithm automatically. Currently
* ``'batch_tree_reorg'`` is used for dense storage
and 'naive' for sparse storage
threshold : float
threshold is used to for classification
This is optional and required only while performing the
predict operation on the GPU.
convert_dtype : boolean, default=True
whether to convert input data to correct dtype automatically
predict_model : String (default = 'GPU')
'GPU' to predict using the GPU, 'CPU' otherwise. The 'GPU' can only
be used if the model was trained on float32 data and `X` is float32
or convert_dtype is set to True. Also the 'GPU' should only be
used for classification problems.
fil_sparse_format : boolean or string (default = auto)
This variable is used to choose the type of forest that will be
created in the Forest Inference Library. It is not required
while using predict_model='CPU'.
* ``'auto'`` - choose the storage type automatically
(currently True is chosen by auto)
* ``False`` - create a dense forest
* ``True`` - create a sparse forest, requires algo='naive'
or algo='auto'
Returns
-------
accuracy : float
Accuracy of the model [0.0 - 1.0]
"""
cdef uintptr_t y_ptr
_, n_rows, _, _ = \
input_to_cuml_array(X, check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_cols=self.n_cols)
y_m, n_rows, _, _ = \
input_to_cuml_array(y, check_dtype=np.int32,
convert_to_dtype=(np.int32 if convert_dtype
else False))
y_ptr = y_m.ptr
preds = self.predict(X,
threshold=threshold, algo=algo,
convert_dtype=convert_dtype,
predict_model=predict_model,
fil_sparse_format=fil_sparse_format)
cdef uintptr_t preds_ptr
preds_m, _, _, _ = \
input_to_cuml_array(preds, convert_to_dtype=np.int32)
preds_ptr = preds_m.ptr
cdef handle_t* handle_ =\
<handle_t*><uintptr_t>self.handle.getHandle()
cdef RandomForestMetaData[float, int] *rf_forest = \
<RandomForestMetaData[float, int]*><uintptr_t> self.rf_forest
cdef RandomForestMetaData[double, int] *rf_forest64 = \
<RandomForestMetaData[double, int]*><uintptr_t> self.rf_forest64
if self.dtype == np.float32:
self.stats = score(handle_[0],
rf_forest,
<int*> y_ptr,
<int> n_rows,
<int*> preds_ptr,
<int> self.verbose)
elif self.dtype == np.float64:
self.stats = score(handle_[0],
rf_forest64,
<int*> y_ptr,
<int> n_rows,
<int*> preds_ptr,
<int> self.verbose)
else:
raise TypeError("supports only np.float32 and np.float64 input,"
" but input of type '%s' passed."
% (str(self.dtype)))
self.handle.sync()
del y_m
del preds_m
return self.stats['accuracy']
def get_summary_text(self):
"""
Obtain the text summary of the random forest model
"""
cdef RandomForestMetaData[float, int] *rf_forest = \
<RandomForestMetaData[float, int]*><uintptr_t> self.rf_forest
cdef RandomForestMetaData[double, int] *rf_forest64 = \
<RandomForestMetaData[double, int]*><uintptr_t> self.rf_forest64
if self.dtype == np.float64:
return get_rf_summary_text(rf_forest64).decode('utf-8')
else:
return get_rf_summary_text(rf_forest).decode('utf-8')
def get_detailed_text(self):
"""
Obtain the detailed information for the random forest model, as text
"""
cdef RandomForestMetaData[float, int] *rf_forest = \
<RandomForestMetaData[float, int]*><uintptr_t> self.rf_forest
cdef RandomForestMetaData[double, int] *rf_forest64 = \
<RandomForestMetaData[double, int]*><uintptr_t> self.rf_forest64
if self.dtype == np.float64:
return get_rf_detailed_text(rf_forest64).decode('utf-8')
else:
return get_rf_detailed_text(rf_forest).decode('utf-8')
def get_json(self):
"""
Export the Random Forest model as a JSON string
"""
cdef RandomForestMetaData[float, int] *rf_forest = \
<RandomForestMetaData[float, int]*><uintptr_t> self.rf_forest
cdef RandomForestMetaData[double, int] *rf_forest64 = \
<RandomForestMetaData[double, int]*><uintptr_t> self.rf_forest64
if self.dtype == np.float64:
return get_rf_json(rf_forest64).decode('utf-8')
return get_rf_json(rf_forest).decode('utf-8')
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/ensemble/randomforest_shared.pxd | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import math
import numpy as np
import warnings
from libcpp cimport bool
from libc.stdint cimport uintptr_t, uint64_t
from libc.stdlib cimport calloc, malloc, free
from libcpp.vector cimport vector
from libcpp.string cimport string
from pylibraft.common.handle import Handle
from cuml import ForestInference
from cuml.internals.base import Base
from pylibraft.common.handle cimport handle_t
cimport cuml.common.cuda
cdef extern from "treelite/c_api.h":
ctypedef void* ModelHandle
ctypedef void* ModelBuilderHandle
cdef const char* TreeliteGetLastError()
cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML":
cdef enum CRITERION:
GINI,
ENTROPY,
MSE,
MAE,
POISSON,
GAMMA,
INVERSE_GAUSSIAN,
CRITERION_END
cdef extern from "cuml/ensemble/randomforest.hpp" namespace "ML":
cdef enum RF_type:
CLASSIFICATION,
REGRESSION
cdef enum task_category:
REGRESSION_MODEL = 1,
CLASSIFICATION_MODEL = 2
cdef struct RF_metrics:
RF_type rf_type
float accuracy
double mean_abs_error
double mean_squared_error
double median_abs_error
cdef struct RF_params:
int n_trees
bool bootstrap
float max_samples
int seed
pass
cdef cppclass RandomForestMetaData[T, L]:
void* trees
RF_params rf_params
#
# Treelite handling
#
cdef void build_treelite_forest[T, L](ModelHandle*,
RandomForestMetaData[T, L]*,
int
) except +
cdef void delete_rf_metadata[T, L](RandomForestMetaData[T, L]*) except +
#
# Text representation of random forest
#
cdef string get_rf_summary_text[T, L](RandomForestMetaData[T, L]*) except +
cdef string get_rf_detailed_text[T, L](RandomForestMetaData[T, L]*
) except +
cdef string get_rf_json[T, L](RandomForestMetaData[T, L]*) except +
cdef RF_params set_rf_params(int,
int,
float,
int,
int,
int,
float,
bool,
int,
float,
uint64_t,
CRITERION,
int,
int) except +
cdef vector[unsigned char] save_model(ModelHandle)
cdef ModelHandle concatenate_trees(
vector[ModelHandle] &treelite_handles) except +
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/ensemble/__init__.py | #
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.ensemble.randomforestclassifier import RandomForestClassifier
from cuml.ensemble.randomforestregressor import RandomForestRegressor
from cuml.ensemble.randomforest_common import (
_check_fil_parameter_validity,
_obtain_fil_model,
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/solvers/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources
cd.pyx
qn.pyx
sgd.pyx
)
if(NOT SINGLEGPU)
list(APPEND cython_sources
cd_mg.pyx
)
endif()
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_mg_libraries}"
MODULE_PREFIX solvers_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/solvers/cd.pyx | # Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
from libc.stdint cimport uintptr_t
from cuml.common import CumlArray
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.base import Base
from cuml.common.doc_utils import generate_docstring
from cuml.internals.input_utils import input_to_cuml_array
from cuml.internals.mixins import FMajorInputTagMixin
IF GPUBUILD == 1:
from libcpp cimport bool
from pylibraft.common.handle cimport handle_t
cdef extern from "cuml/solvers/solver.hpp" namespace "ML::Solver":
cdef void cdFit(handle_t& handle,
float *input,
int n_rows,
int n_cols,
float *labels,
float *coef,
float *intercept,
bool fit_intercept,
bool normalize,
int epochs,
int loss,
float alpha,
float l1_ratio,
bool shuffle,
float tol,
float *sample_weight) except +
cdef void cdFit(handle_t& handle,
double *input,
int n_rows,
int n_cols,
double *labels,
double *coef,
double *intercept,
bool fit_intercept,
bool normalize,
int epochs,
int loss,
double alpha,
double l1_ratio,
bool shuffle,
double tol,
double *sample_weight) except +
cdef void cdPredict(handle_t& handle,
const float *input,
int n_rows,
int n_cols,
const float *coef,
float intercept,
float *preds,
int loss) except +
cdef void cdPredict(handle_t& handle,
const double *input,
int n_rows,
int n_cols,
const double *coef,
double intercept,
double *preds,
int loss) except +
class CD(Base,
FMajorInputTagMixin):
"""
Coordinate Descent (CD) is a very common optimization algorithm that
minimizes along coordinate directions to find the minimum of a function.
cuML's CD algorithm accepts a numpy matrix or a cuDF DataFrame as the
input dataset.algorithm The CD algorithm currently works with linear
regression and ridge, lasso, and elastic-net penalties.
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> import cudf
>>> from cuml.solvers import CD as cumlCD
>>> cd = cumlCD(alpha=0.0)
>>> X = cudf.DataFrame()
>>> X['col1'] = cp.array([1,1,2,2], dtype=cp.float32)
>>> X['col2'] = cp.array([1,2,2,3], dtype=cp.float32)
>>> y = cudf.Series(cp.array([6.0, 8.0, 9.0, 11.0], dtype=cp.float32))
>>> cd.fit(X,y)
CD()
>>> print(cd.coef_) # doctest: +SKIP
0 1.001...
1 1.998...
dtype: float32
>>> print(cd.intercept_) # doctest: +SKIP
3.00...
>>> X_new = cudf.DataFrame()
>>> X_new['col1'] = cp.array([3,2], dtype=cp.float32)
>>> X_new['col2'] = cp.array([5,5], dtype=cp.float32)
>>> preds = cd.predict(X_new)
>>> print(preds) # doctest: +SKIP
0 15.997...
1 14.995...
dtype: float32
Parameters
----------
loss : 'squared_loss'
Only 'squared_loss' is supported right now.
'squared_loss' uses linear regression in its predict step.
alpha: float (default = 0.0001)
The constant value which decides the degree of regularization.
'alpha = 0' is equivalent to an ordinary least square, solved by the
LinearRegression object.
l1_ratio: float (default = 0.15)
The ElasticNet mixing parameter, with 0 <= l1_ratio <= 1. For
l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty. For 0 < l1_ratio < 1,
the penalty is a combination of L1 and L2.
fit_intercept : boolean (default = True)
If True, the model tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
normalize : boolean (default = False)
Whether to normalize the data or not.
max_iter : int (default = 1000)
The number of times the model should iterate through the entire
dataset during training
tol : float (default = 1e-3)
The tolerance for the optimization: if the updates are smaller than tol,
solver stops.
shuffle : boolean (default = True)
If set to 'True', a random coefficient is updated every iteration rather
than looping over features sequentially by default.
This (setting to 'True') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
"""
coef_ = CumlArrayDescriptor()
def __init__(self, *, loss='squared_loss', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, normalize=False, max_iter=1000, tol=1e-3,
shuffle=True, handle=None, output_type=None, verbose=False):
if loss not in ['squared_loss']:
msg = "loss {!r} is not supported"
raise NotImplementedError(msg.format(loss))
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self.loss = loss
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.shuffle = shuffle
self.intercept_value = 0.0
self.coef_ = None
self.intercept_ = None
def _check_alpha(self, alpha):
for el in alpha:
if el <= 0.0:
msg = "alpha values have to be positive"
raise TypeError(msg.format(alpha))
def _get_loss_int(self):
return {
'squared_loss': 0,
}[self.loss]
@generate_docstring()
def fit(self, X, y, convert_dtype=False, sample_weight=None) -> "CD":
"""
Fit the model with X and y.
"""
cdef uintptr_t sample_weight_ptr
X_m, n_rows, self.n_cols, self.dtype = \
input_to_cuml_array(X, check_dtype=[np.float32, np.float64])
y_m, *_ = \
input_to_cuml_array(y, check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_rows=n_rows, check_cols=1)
if sample_weight is not None:
sample_weight_m, _, _, _ = \
input_to_cuml_array(sample_weight, check_dtype=self.dtype,
convert_to_dtype=(
self.dtype if convert_dtype else None),
check_rows=n_rows, check_cols=1)
sample_weight_ptr = sample_weight_m.ptr
else:
sample_weight_ptr = 0
cdef uintptr_t _X_ptr = X_m.ptr
cdef uintptr_t _y_ptr = y_m.ptr
self.n_alpha = 1
self.coef_ = CumlArray.zeros(self.n_cols, dtype=self.dtype)
cdef uintptr_t _coef_ptr = self.coef_.ptr
cdef float _c_intercept_f32
cdef double _c_intercept2_f64
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if self.dtype == np.float32:
cdFit(handle_[0],
<float*>_X_ptr,
<int>n_rows,
<int>self.n_cols,
<float*>_y_ptr,
<float*>_coef_ptr,
<float*>&_c_intercept_f32,
<bool>self.fit_intercept,
<bool>self.normalize,
<int>self.max_iter,
<int>self._get_loss_int(),
<float>self.alpha,
<float>self.l1_ratio,
<bool>self.shuffle,
<float>self.tol,
<float*>sample_weight_ptr)
self.intercept_ = _c_intercept_f32
else:
cdFit(handle_[0],
<double*>_X_ptr,
<int>n_rows,
<int>self.n_cols,
<double*>_y_ptr,
<double*>_coef_ptr,
<double*>&_c_intercept2_f64,
<bool>self.fit_intercept,
<bool>self.normalize,
<int>self.max_iter,
<int>self._get_loss_int(),
<double>self.alpha,
<double>self.l1_ratio,
<bool>self.shuffle,
<double>self.tol,
<double*>sample_weight_ptr)
self.intercept_ = _c_intercept2_f64
self.handle.sync()
del X_m
del y_m
if sample_weight is not None:
del sample_weight_m
return self
@generate_docstring(return_values={'name': 'preds',
'type': 'dense',
'description': 'Predicted values',
'shape': '(n_samples, 1)'})
def predict(self, X, convert_dtype=False) -> CumlArray:
"""
Predicts the y for X.
"""
X_m, n_rows, _n_cols, _ = \
input_to_cuml_array(X, check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_cols=self.n_cols)
cdef uintptr_t _X_ptr = X_m.ptr
cdef uintptr_t _coef_ptr = self.coef_.ptr
preds = CumlArray.zeros(n_rows, dtype=self.dtype,
index=X_m.index)
cdef uintptr_t _preds_ptr = preds.ptr
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if self.dtype == np.float32:
cdPredict(handle_[0],
<float*>_X_ptr,
<int>n_rows,
<int>_n_cols,
<float*>_coef_ptr,
<float>self.intercept_,
<float*>_preds_ptr,
<int>self._get_loss_int())
else:
cdPredict(handle_[0],
<double*>_X_ptr,
<int>n_rows,
<int>_n_cols,
<double*>_coef_ptr,
<double>self.intercept_,
<double*>_preds_ptr,
<int>self._get_loss_int())
self.handle.sync()
del X_m
return preds
def get_param_names(self):
return super().get_param_names() + [
"loss",
"alpha",
"l1_ratio",
"fit_intercept",
"normalize",
"max_iter",
"tol",
"shuffle",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/solvers/cd_mg.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
rmm = gpu_only_import('rmm')
from libcpp cimport bool
from libc.stdint cimport uintptr_t
from cython.operator cimport dereference as deref
import cuml.internals
from pylibraft.common.handle cimport handle_t
from cuml.common.opg_data_utils_mg cimport *
from cuml.decomposition.utils cimport *
from cuml.linear_model.base_mg import MGFitMixin
from cuml.solvers import CD
cdef extern from "cuml/solvers/cd_mg.hpp" namespace "ML::CD::opg":
cdef void fit(handle_t& handle,
vector[floatData_t *] input_data,
PartDescriptor &input_desc,
vector[floatData_t *] labels,
float *coef,
float *intercept,
bool fit_intercept,
bool normalize,
int epochs,
float alpha,
float l1_ratio,
bool shuffle,
float tol,
bool verbose) except +
cdef void fit(handle_t& handle,
vector[doubleData_t *] input_data,
PartDescriptor &input_desc,
vector[doubleData_t *] labels,
double *coef,
double *intercept,
bool fit_intercept,
bool normalize,
int epochs,
double alpha,
double l1_ratio,
bool shuffle,
double tol,
bool verbose) except +
class CDMG(MGFitMixin, CD):
"""
Cython class for MNMG code usage. Not meant for end user consumption.
"""
def __init__(self, **kwargs):
super(CDMG, self).__init__(**kwargs)
@cuml.internals.api_base_return_any_skipall
def _fit(self, X, y, coef_ptr, input_desc):
cdef float float_intercept
cdef double double_intercept
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if self.dtype == np.float32:
fit(handle_[0],
deref(<vector[floatData_t*]*><uintptr_t>X),
deref(<PartDescriptor*><uintptr_t>input_desc),
deref(<vector[floatData_t*]*><uintptr_t>y),
<float*><size_t>coef_ptr,
<float*>&float_intercept,
<bool>self.fit_intercept,
<bool>self.normalize,
<int>self.max_iter,
<float>self.alpha,
<float>self.l1_ratio,
<bool>self.shuffle,
<float>self.tol,
False)
self.intercept_ = float_intercept
else:
fit(handle_[0],
deref(<vector[doubleData_t*]*><uintptr_t>X),
deref(<PartDescriptor*><uintptr_t>input_desc),
deref(<vector[doubleData_t*]*><uintptr_t>y),
<double*><size_t>coef_ptr,
<double*>&double_intercept,
<bool>self.fit_intercept,
<bool>self.normalize,
<int>self.max_iter,
<double>self.alpha,
<double>self.l1_ratio,
<bool>self.shuffle,
<double>self.tol,
False)
self.intercept_ = double_intercept
self.handle.sync()
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/solvers/qn.pyx | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from libc.stdint cimport uintptr_t
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.internals.base import Base
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.array_sparse import SparseCumlArray
from cuml.internals.global_settings import GlobalSettings
from cuml.common.doc_utils import generate_docstring
from cuml.common import input_to_cuml_array
from cuml.internals.mixins import FMajorInputTagMixin
from cuml.common.sparse_utils import is_sparse
IF GPUBUILD == 1:
from libcpp cimport bool
from cuml.metrics import accuracy_score
from pylibraft.common.handle cimport handle_t
cdef extern from "cuml/linear_model/glm.hpp" namespace "ML::GLM" nogil:
cdef enum qn_loss_type "ML::GLM::qn_loss_type":
QN_LOSS_LOGISTIC "ML::GLM::QN_LOSS_LOGISTIC"
QN_LOSS_SQUARED "ML::GLM::QN_LOSS_SQUARED"
QN_LOSS_SOFTMAX "ML::GLM::QN_LOSS_SOFTMAX"
QN_LOSS_SVC_L1 "ML::GLM::QN_LOSS_SVC_L1"
QN_LOSS_SVC_L2 "ML::GLM::QN_LOSS_SVC_L2"
QN_LOSS_SVR_L1 "ML::GLM::QN_LOSS_SVR_L1"
QN_LOSS_SVR_L2 "ML::GLM::QN_LOSS_SVR_L2"
QN_LOSS_ABS "ML::GLM::QN_LOSS_ABS"
QN_LOSS_UNKNOWN "ML::GLM::QN_LOSS_UNKNOWN"
cdef struct qn_params:
qn_loss_type loss
double penalty_l1
double penalty_l2
double grad_tol
double change_tol
int max_iter
int linesearch_max_iter
int lbfgs_memory
int verbose
bool fit_intercept
bool penalty_normalized
void qnFit[T, I](
const handle_t& cuml_handle,
const qn_params& pams,
T *X,
bool X_col_major,
T *y,
I N,
I D,
I C,
T *w0,
T *f,
int *num_iters,
T *sample_weight) except +
void qnFitSparse[T, I](
const handle_t& cuml_handle,
const qn_params& pams,
T *X_values,
I *X_cols,
I *X_row_ids,
I X_nnz,
T *y,
I N,
I D,
I C,
T *w0,
T *f,
int *num_iters,
T *sample_weight) except +
void qnDecisionFunction[T, I](
const handle_t& cuml_handle,
const qn_params& pams,
T *X,
bool X_col_major,
I N,
I D,
I C,
T *params,
T *scores) except +
void qnDecisionFunctionSparse[T, I](
const handle_t& cuml_handle,
const qn_params& pams,
T *X_values,
I *X_cols,
I *X_row_ids,
I X_nnz,
I N,
I D,
I C,
T *params,
T *scores) except +
void qnPredict[T, I](
const handle_t& cuml_handle,
const qn_params& pams,
T *X,
bool X_col_major,
I N,
I D,
I C,
T *params,
T *preds) except +
void qnPredictSparse[T, I](
const handle_t& cuml_handle,
const qn_params& pams,
T *X_values,
I *X_cols,
I *X_row_ids,
I X_nnz,
I N,
I D,
I C,
T *params,
T *preds) except +
class StructWrapper(type):
'''Define a property for each key in `get_param_defaults`,
for which there is no explicit property defined in the class.
'''
def __new__(cls, name, bases, attrs):
def add_prop(prop_name):
setattr(x, prop_name, property(
lambda self: self._getparam(prop_name),
lambda self, value: self._setparam(prop_name, value)
))
x = super().__new__(cls, name, bases, attrs)
for prop_name in getattr(x, 'get_param_defaults', lambda: {})():
if not hasattr(x, prop_name):
add_prop(prop_name)
del add_prop
return x
class StructParams(metaclass=StructWrapper):
params: dict
def __new__(cls, *args, **kwargs):
x = object.__new__(cls)
x.params = cls.get_param_defaults().copy()
return x
def __init__(self, **kwargs):
allowed_keys = set(self.get_param_names())
for key, val in kwargs.items():
if key in allowed_keys:
setattr(self, key, val)
def _getparam(self, key):
return self.params[key]
def _setparam(self, key, val):
self.params[key] = val
def get_param_names(self):
return self.get_param_defaults().keys()
def __str__(self):
return type(self).__name__ + str(self.params)
class QNParams(StructParams):
@staticmethod
def get_param_defaults():
IF GPUBUILD == 1:
cdef qn_params ps
return ps
@property
def loss(self) -> str:
loss = self._getparam('loss')
IF GPUBUILD == 1:
if loss == qn_loss_type.QN_LOSS_LOGISTIC:
return "sigmoid"
if loss == qn_loss_type.QN_LOSS_SQUARED:
return "l2"
if loss == qn_loss_type.QN_LOSS_SOFTMAX:
return "softmax"
if loss == qn_loss_type.QN_LOSS_SVC_L1:
return "svc_l1"
if loss == qn_loss_type.QN_LOSS_SVC_L2:
return "svc_l2"
if loss == qn_loss_type.QN_LOSS_SVR_L1:
return "svr_l1"
if loss == qn_loss_type.QN_LOSS_SVR_L2:
return "svr_l2"
if loss == qn_loss_type.QN_LOSS_ABS:
return "l1"
raise ValueError(f"Unknown loss enum value: {loss}")
@loss.setter
def loss(self, loss: str):
IF GPUBUILD == 1:
if loss in {"sigmoid", "logistic"}:
self._setparam('loss', qn_loss_type.QN_LOSS_LOGISTIC)
elif loss == "softmax":
self._setparam('loss', qn_loss_type.QN_LOSS_SOFTMAX)
elif loss in {"normal", "l2"}:
self._setparam('loss', qn_loss_type.QN_LOSS_SQUARED)
elif loss == "l1":
self._setparam('loss', qn_loss_type.QN_LOSS_ABS)
elif loss == "svc_l1":
self._setparam('loss', qn_loss_type.QN_LOSS_SVC_L1)
elif loss == "svc_l2":
self._setparam('loss', qn_loss_type.QN_LOSS_SVC_L2)
elif loss == "svr_l1":
self._setparam('loss', qn_loss_type.QN_LOSS_SVR_L1)
elif loss == "svr_l2":
self._setparam('loss', qn_loss_type.QN_LOSS_SVR_L2)
else:
raise ValueError(f"Unknown loss string value: {loss}")
class QN(Base,
FMajorInputTagMixin):
"""
Quasi-Newton methods are used to either find zeroes or local maxima
and minima of functions, and used by this class to optimize a cost
function.
Two algorithms are implemented underneath cuML's QN class, and which one
is executed depends on the following rule:
* Orthant-Wise Limited Memory Quasi-Newton (OWL-QN) if there is l1
regularization
* Limited Memory BFGS (L-BFGS) otherwise.
cuML's QN class can take array-like objects, either in host as
NumPy arrays or in device (as Numba or __cuda_array_interface__ compliant).
Examples
--------
.. code-block:: python
>>> import cudf
>>> import cupy as cp
>>> # Both import methods supported
>>> # from cuml import QN
>>> from cuml.solvers import QN
>>> X = cudf.DataFrame()
>>> X['col1'] = cp.array([1,1,2,2], dtype=cp.float32)
>>> X['col2'] = cp.array([1,2,2,3], dtype=cp.float32)
>>> y = cudf.Series(cp.array([0.0, 0.0, 1.0, 1.0], dtype=cp.float32) )
>>> solver = QN()
>>> solver.fit(X,y)
QN()
>>> # Note: for now, the coefficients also include the intercept in the
>>> # last position if fit_intercept=True
>>> print(solver.coef_) # doctest: +SKIP
0 37.371...
1 0.949...
dtype: float32
>>> print(solver.intercept_) # doctest: +SKIP
0 -57.738...
>>> X_new = cudf.DataFrame()
>>> X_new['col1'] = cp.array([1,5], dtype=cp.float32)
>>> X_new['col2'] = cp.array([2,5], dtype=cp.float32)
>>> preds = solver.predict(X_new)
>>> print(preds)
0 0.0
1 1.0
dtype: float32
Parameters
----------
loss: 'sigmoid', 'softmax', 'l1', 'l2', 'svc_l1', 'svc_l2', 'svr_l1', \
'svr_l2' (default = 'sigmoid').
'sigmoid' loss used for single class logistic regression;
'softmax' loss used for multiclass logistic regression;
'l1'/'l2' loss used for regression.
fit_intercept: boolean (default = True)
If True, the model tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
l1_strength: float (default = 0.0)
l1 regularization strength (if non-zero, will run OWL-QN, else L-BFGS).
Use `penalty_normalized` to control whether the solver divides this
by the sample size.
l2_strength: float (default = 0.0)
l2 regularization strength.
Use `penalty_normalized` to control whether the solver divides this
by the sample size.
max_iter: int (default = 1000)
Maximum number of iterations taken for the solvers to converge.
tol: float (default = 1e-4)
The training process will stop if
`norm(current_loss_grad) <= tol * max(current_loss, tol)`.
This differs slightly from the `gtol`-controlled stopping condition in
`scipy.optimize.minimize(method='L-BFGS-B')
<https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html>`_:
`norm(current_loss_projected_grad) <= gtol`.
Note, `sklearn.LogisticRegression()
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html>`_
uses the sum of softmax/logistic loss over the input data, whereas cuML
uses the average. As a result, Scikit-learn's loss is usually
`sample_size` times larger than cuML's.
To account for the differences you may divide the `tol` by the sample
size; this would ensure that the cuML solver does not stop earlier than
the Scikit-learn solver.
delta: Optional[float] (default = None)
The training process will stop if
`abs(current_loss - previous_loss) <= delta * max(current_loss, tol)`.
When `None`, it's set to `tol * 0.01`; when `0`, the check is disabled.
Given the current step `k`, parameter `previous_loss` here is the loss
at the step `k - p`, where `p` is a small positive integer set
internally.
Note, this parameter corresponds to `ftol` in
`scipy.optimize.minimize(method='L-BFGS-B')
<https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html>`_,
which is set by default to a minuscule `2.2e-9` and is not exposed in
`sklearn.LogisticRegression()
<https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html>`_.
This condition is meant to protect the solver against doing vanishingly
small linesearch steps or zigzagging.
You may choose to set `delta = 0` to make sure the cuML solver does
not stop earlier than the Scikit-learn solver.
linesearch_max_iter: int (default = 50)
Max number of linesearch iterations per outer iteration of the
algorithm.
lbfgs_memory: int (default = 5)
Rank of the lbfgs inverse-Hessian approximation. Method will use
O(lbfgs_memory * D) memory.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
penalty_normalized : bool, default=True
When set to True, l1 and l2 parameters are divided by the sample size.
This flag can be used to achieve a behavior compatible with other
implementations, such as sklearn's.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
The estimated coefficients for the linear regression model.
Note: shape is (n_classes, n_features + 1) if fit_intercept = True.
intercept_ : array (n_classes, 1)
The independent term. If `fit_intercept` is False, will be 0.
Notes
-----
This class contains implementations of two popular Quasi-Newton methods:
- Limited-memory Broyden Fletcher Goldfarb Shanno (L-BFGS) [Nocedal,
Wright - Numerical Optimization (1999)]
- `Orthant-wise limited-memory quasi-newton (OWL-QN)
[Andrew, Gao - ICML 2007]
<https://www.microsoft.com/en-us/research/publication/scalable-training-of-l1-regularized-log-linear-models/>`_
"""
_coef_ = CumlArrayDescriptor()
intercept_ = CumlArrayDescriptor()
def __init__(self, *, loss='sigmoid', fit_intercept=True,
l1_strength=0.0, l2_strength=0.0, max_iter=1000, tol=1e-4,
delta=None, linesearch_max_iter=50, lbfgs_memory=5,
verbose=False, handle=None, output_type=None,
warm_start=False, penalty_normalized=True):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self.fit_intercept = fit_intercept
self.l1_strength = l1_strength
self.l2_strength = l2_strength
self.max_iter = max_iter
self.tol = tol
self.delta = delta
self.linesearch_max_iter = linesearch_max_iter
self.lbfgs_memory = lbfgs_memory
self.num_iter = 0
self._coef_ = None
self.intercept_ = None
self.warm_start = warm_start
self.penalty_normalized = penalty_normalized
self.loss = loss
@property
@cuml.internals.api_base_return_array_skipall
def coef_(self):
if self._coef_ is None:
return None
if self.fit_intercept:
val = self._coef_[0:-1]
else:
val = self._coef_
val = val.to_output('array')
val = val.T
return val
@coef_.setter
def coef_(self, value):
value = value.to_output('array').T
if self.fit_intercept:
value = GlobalSettings().xpy.vstack([value, self.intercept_])
value, _, _, _ = input_to_cuml_array(value)
self._coef_ = value
@generate_docstring(X='dense_sparse')
def fit(self, X, y, sample_weight=None, convert_dtype=False) -> "QN":
"""
Fit the model with X and y.
"""
sparse_input = is_sparse(X)
# Handle sparse inputs
if sparse_input:
X_m = SparseCumlArray(X, convert_index=np.int32)
n_rows, self.n_cols = X_m.shape
self.dtype = X_m.dtype
# Handle dense inputs
else:
X_m, n_rows, self.n_cols, self.dtype = input_to_cuml_array(
X, check_dtype=[np.float32, np.float64], order='K'
)
y_m, _, _, _ = input_to_cuml_array(
y, check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype else None),
check_rows=n_rows, check_cols=1
)
cdef uintptr_t _y_ptr = y_m.ptr
cdef uintptr_t _sample_weight_ptr = 0
if sample_weight is not None:
sample_weight, _, _, _ = \
input_to_cuml_array(sample_weight,
check_dtype=self.dtype,
check_rows=n_rows, check_cols=1,
convert_to_dtype=(self.dtype
if convert_dtype
else None))
_sample_weight_ptr = sample_weight.ptr
IF GPUBUILD == 1:
self.qnparams = QNParams(
loss=self.loss,
penalty_l1=self.l1_strength,
penalty_l2=self.l2_strength,
grad_tol=self.tol,
change_tol=self.delta
if self.delta is not None else (self.tol * 0.01),
max_iter=self.max_iter,
linesearch_max_iter=self.linesearch_max_iter,
lbfgs_memory=self.lbfgs_memory,
verbose=self.verbose,
fit_intercept=self.fit_intercept,
penalty_normalized=self.penalty_normalized
)
cdef qn_params qnpams = self.qnparams.params
solves_classification = qnpams.loss in {
qn_loss_type.QN_LOSS_LOGISTIC,
qn_loss_type.QN_LOSS_SOFTMAX,
qn_loss_type.QN_LOSS_SVC_L1,
qn_loss_type.QN_LOSS_SVC_L2
}
solves_multiclass = qnpams.loss in {
qn_loss_type.QN_LOSS_SOFTMAX
}
if solves_classification:
self._num_classes = len(cp.unique(y_m))
else:
self._num_classes = 1
if not solves_multiclass and self._num_classes > 2:
raise ValueError(
f"The selected solver ({self.loss}) does not support"
f" more than 2 classes ({self._num_classes} discovered).")
if qnpams.loss == qn_loss_type.QN_LOSS_SOFTMAX \
and self._num_classes <= 2:
raise ValueError("Two classes or less cannot be trained"
"with softmax (multinomial).")
if solves_classification and not solves_multiclass:
self._num_classes_dim = self._num_classes - 1
else:
self._num_classes_dim = self._num_classes
if self.fit_intercept:
coef_size = (self.n_cols + 1, self._num_classes_dim)
else:
coef_size = (self.n_cols, self._num_classes_dim)
if self._coef_ is None or not self.warm_start:
self._coef_ = CumlArray.zeros(
coef_size, dtype=self.dtype, order='C')
cdef uintptr_t _coef_ptr = self._coef_.ptr
cdef float objective32
cdef double objective64
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef int num_iters
if self.dtype == np.float32:
if sparse_input:
qnFitSparse[float, int](
handle_[0],
qnpams,
<float*><uintptr_t> X_m.data.ptr,
<int*><uintptr_t> X_m.indices.ptr,
<int*><uintptr_t> X_m.indptr.ptr,
<int> X_m.nnz,
<float*> _y_ptr,
<int> n_rows,
<int> self.n_cols,
<int> self._num_classes,
<float*> _coef_ptr,
<float*> &objective32,
<int*> &num_iters,
<float*> _sample_weight_ptr)
else:
qnFit[float, int](
handle_[0],
qnpams,
<float*><uintptr_t> X_m.ptr,
<bool> _is_col_major(X_m),
<float*> _y_ptr,
<int> n_rows,
<int> self.n_cols,
<int> self._num_classes,
<float*> _coef_ptr,
<float*> &objective32,
<int*> &num_iters,
<float*> _sample_weight_ptr)
self.objective = objective32
else:
if sparse_input:
qnFitSparse[double, int](
handle_[0],
qnpams,
<double*><uintptr_t> X_m.data.ptr,
<int*><uintptr_t> X_m.indices.ptr,
<int*><uintptr_t> X_m.indptr.ptr,
<int> X_m.nnz,
<double*> _y_ptr,
<int> n_rows,
<int> self.n_cols,
<int> self._num_classes,
<double*> _coef_ptr,
<double*> &objective64,
<int*> &num_iters,
<double*> _sample_weight_ptr)
else:
qnFit[double, int](
handle_[0],
qnpams,
<double*><uintptr_t> X_m.ptr,
<bool> _is_col_major(X_m),
<double*> _y_ptr,
<int> n_rows,
<int> self.n_cols,
<int> self._num_classes,
<double*> _coef_ptr,
<double*> &objective64,
<int*> &num_iters,
<double*> _sample_weight_ptr)
self.objective = objective64
self.num_iters = num_iters
self._calc_intercept()
self.handle.sync()
del X_m
del y_m
return self
@cuml.internals.api_base_return_array_skipall
def _decision_function(self, X, convert_dtype=False) -> CumlArray:
"""
Gives confidence score for X
Parameters
----------
X : array-like (device or host) shape = (n_samples, n_features)
Dense matrix (floats or doubles) of shape (n_samples, n_features).
Acceptable formats: cuDF DataFrame, NumPy ndarray, Numba device
ndarray, cuda array interface compliant array like CuPy
convert_dtype : bool, optional (default = False)
When set to True, the predict method will, when necessary, convert
the input to the data type which was used to train the model. This
will increase memory used for the method.
Returns
----------
y: array-like (device)
Dense matrix (floats or doubles) of shape (n_samples, n_classes)
"""
coefs = self.coef_
dtype = coefs.dtype
_num_classes_dim, n_cols = coefs.shape
sparse_input = is_sparse(X)
# Handle sparse inputs
if sparse_input:
X_m = SparseCumlArray(
X,
convert_to_dtype=(dtype if convert_dtype else None),
convert_index=np.int32
)
n_rows, n_cols = X_m.shape
dtype = X_m.dtype
# Handle dense inputs
else:
X_m, n_rows, n_cols, dtype = input_to_cuml_array(
X, check_dtype=dtype,
convert_to_dtype=(dtype if convert_dtype else None),
check_cols=n_cols,
order='K'
)
if _num_classes_dim > 1:
shape = (_num_classes_dim, n_rows)
else:
shape = (n_rows,)
scores = CumlArray.zeros(shape=shape, dtype=dtype, order='F')
cdef uintptr_t _coef_ptr = self._coef_.ptr
cdef uintptr_t _scores_ptr = scores.ptr
IF GPUBUILD == 1:
if not hasattr(self, 'qnparams'):
self.qnparams = QNParams(
loss=self.loss,
penalty_l1=self.l1_strength,
penalty_l2=self.l2_strength,
grad_tol=self.tol,
change_tol=self.delta
if self.delta is not None else (self.tol * 0.01),
max_iter=self.max_iter,
linesearch_max_iter=self.linesearch_max_iter,
lbfgs_memory=self.lbfgs_memory,
verbose=self.verbose,
fit_intercept=self.fit_intercept,
penalty_normalized=self.penalty_normalized
)
_num_classes = self.get_num_classes(_num_classes_dim)
cdef qn_params qnpams = self.qnparams.params
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if dtype == np.float32:
if sparse_input:
qnDecisionFunctionSparse[float, int](
handle_[0],
qnpams,
<float*><uintptr_t> X_m.data.ptr,
<int*><uintptr_t> X_m.indices.ptr,
<int*><uintptr_t> X_m.indptr.ptr,
<int> X_m.nnz,
<int> n_rows,
<int> n_cols,
<int> _num_classes,
<float*> _coef_ptr,
<float*> _scores_ptr)
else:
qnDecisionFunction[float, int](
handle_[0],
qnpams,
<float*><uintptr_t> X_m.ptr,
<bool> _is_col_major(X_m),
<int> n_rows,
<int> n_cols,
<int> _num_classes,
<float*> _coef_ptr,
<float*> _scores_ptr)
else:
if sparse_input:
qnDecisionFunctionSparse[double, int](
handle_[0],
qnpams,
<double*><uintptr_t> X_m.data.ptr,
<int*><uintptr_t> X_m.indices.ptr,
<int*><uintptr_t> X_m.indptr.ptr,
<int> X_m.nnz,
<int> n_rows,
<int> n_cols,
<int> _num_classes,
<double*> _coef_ptr,
<double*> _scores_ptr)
else:
qnDecisionFunction[double, int](
handle_[0],
qnpams,
<double*><uintptr_t> X_m.ptr,
<bool> _is_col_major(X_m),
<int> n_rows,
<int> n_cols,
<int> _num_classes,
<double*> _coef_ptr,
<double*> _scores_ptr)
self._calc_intercept()
self.handle.sync()
del X_m
return scores
@generate_docstring(
X='dense_sparse',
return_values={
'name': 'preds',
'type': 'dense',
'description': 'Predicted values',
'shape': '(n_samples, 1)'
})
@cuml.internals.api_base_return_array(get_output_dtype=True)
def predict(self, X, convert_dtype=False) -> CumlArray:
"""
Predicts the y for X.
"""
coefs = self.coef_
dtype = coefs.dtype
_num_classes_dim, n_cols = coefs.shape
sparse_input = is_sparse(X)
# Handle sparse inputs
if sparse_input:
X_m = SparseCumlArray(
X,
convert_to_dtype=(dtype if convert_dtype else None),
convert_index=np.int32
)
n_rows, n_cols = X_m.shape
# Handle dense inputs
else:
X_m, n_rows, n_cols, dtype = input_to_cuml_array(
X, check_dtype=dtype,
convert_to_dtype=(dtype if convert_dtype else None),
check_cols=n_cols,
order='K'
)
preds = CumlArray.zeros(shape=n_rows, dtype=dtype,
index=X_m.index)
cdef uintptr_t _coef_ptr = self._coef_.ptr
cdef uintptr_t _pred_ptr = preds.ptr
# temporary fix for dask-sql empty partitions
if(n_rows == 0):
return preds
IF GPUBUILD == 1:
if not hasattr(self, 'qnparams'):
self.qnparams = QNParams(
loss=self.loss,
penalty_l1=self.l1_strength,
penalty_l2=self.l2_strength,
grad_tol=self.tol,
change_tol=self.delta
if self.delta is not None else (self.tol * 0.01),
max_iter=self.max_iter,
linesearch_max_iter=self.linesearch_max_iter,
lbfgs_memory=self.lbfgs_memory,
verbose=self.verbose,
fit_intercept=self.fit_intercept,
penalty_normalized=self.penalty_normalized
)
_num_classes = self.get_num_classes(_num_classes_dim)
cdef qn_params qnpams = self.qnparams.params
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if dtype == np.float32:
if sparse_input:
qnPredictSparse[float, int](
handle_[0],
qnpams,
<float*><uintptr_t> X_m.data.ptr,
<int*><uintptr_t> X_m.indices.ptr,
<int*><uintptr_t> X_m.indptr.ptr,
<int> X_m.nnz,
<int> n_rows,
<int> n_cols,
<int> _num_classes,
<float*> _coef_ptr,
<float*> _pred_ptr)
else:
qnPredict[float, int](
handle_[0],
qnpams,
<float*><uintptr_t> X_m.ptr,
<bool> _is_col_major(X_m),
<int> n_rows,
<int> n_cols,
<int> _num_classes,
<float*> _coef_ptr,
<float*> _pred_ptr)
else:
if sparse_input:
qnPredictSparse[double, int](
handle_[0],
qnpams,
<double*><uintptr_t> X_m.data.ptr,
<int*><uintptr_t> X_m.indices.ptr,
<int*><uintptr_t> X_m.indptr.ptr,
<int> X_m.nnz,
<int> n_rows,
<int> n_cols,
<int> _num_classes,
<double*> _coef_ptr,
<double*> _pred_ptr)
else:
qnPredict[double, int](
handle_[0],
qnpams,
<double*><uintptr_t> X_m.ptr,
<bool> _is_col_major(X_m),
<int> n_rows,
<int> n_cols,
<int> _num_classes,
<double*> _coef_ptr,
<double*> _pred_ptr)
self._calc_intercept()
self.handle.sync()
del X_m
return preds
def score(self, X, y):
if GPUBUILD == 1:
return accuracy_score(y, self.predict(X))
def get_num_classes(self, _num_classes_dim):
"""
Retrieves the number of classes from the classes dimension
in the coefficients.
"""
IF GPUBUILD == 1:
cdef qn_params qnpams = self.qnparams.params
solves_classification = qnpams.loss in {
qn_loss_type.QN_LOSS_LOGISTIC,
qn_loss_type.QN_LOSS_SOFTMAX,
qn_loss_type.QN_LOSS_SVC_L1,
qn_loss_type.QN_LOSS_SVC_L2
}
solves_multiclass = qnpams.loss in {
qn_loss_type.QN_LOSS_SOFTMAX
}
if solves_classification and not solves_multiclass:
_num_classes = _num_classes_dim + 1
else:
_num_classes = _num_classes_dim
return _num_classes
def _calc_intercept(self):
"""
If `fit_intercept == True`, then the last row of `coef_` contains
`intercept_`. This should be called after every function that sets
`coef_`
"""
if self.fit_intercept:
self.intercept_ = self._coef_[-1]
return
_num_classes_dim, _ = self.coef_.shape
_num_classes = self.get_num_classes(_num_classes_dim)
if _num_classes == 2:
self.intercept_ = CumlArray.zeros(shape=1)
else:
self.intercept_ = CumlArray.zeros(shape=_num_classes)
def get_param_names(self):
return super().get_param_names() + \
['loss', 'fit_intercept', 'l1_strength', 'l2_strength',
'max_iter', 'tol', 'linesearch_max_iter', 'lbfgs_memory',
'warm_start', 'delta', 'penalty_normalized']
def _is_col_major(X):
return getattr(X, "order", "F").upper() == "F"
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/solvers/__init__.py | #
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.solvers.cd import CD
from cuml.solvers.sgd import SGD
from cuml.solvers.qn import QN
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/solvers/sgd.pyx | # Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.safe_imports import gpu_only_import_from
cuda = gpu_only_import_from('numba', 'cuda')
from libc.stdint cimport uintptr_t
import cuml.internals
from cuml.internals.base import Base
from cuml.internals.array import CumlArray
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.common.doc_utils import generate_docstring
from cuml.common import input_to_cuml_array
from cuml.internals.mixins import FMajorInputTagMixin
IF GPUBUILD == 1:
from libcpp cimport bool
from pylibraft.common.handle cimport handle_t
cdef extern from "cuml/solvers/solver.hpp" namespace "ML::Solver":
cdef void sgdFit(handle_t& handle,
float *input,
int n_rows,
int n_cols,
float *labels,
float *coef,
float *intercept,
bool fit_intercept,
int batch_size,
int epochs,
int lr_type,
float eta0,
float power_t,
int loss,
int penalty,
float alpha,
float l1_ratio,
bool shuffle,
float tol,
int n_iter_no_change) except +
cdef void sgdFit(handle_t& handle,
double *input,
int n_rows,
int n_cols,
double *labels,
double *coef,
double *intercept,
bool fit_intercept,
int batch_size,
int epochs,
int lr_type,
double eta0,
double power_t,
int loss,
int penalty,
double alpha,
double l1_ratio,
bool shuffle,
double tol,
int n_iter_no_change) except +
cdef void sgdPredict(handle_t& handle,
const float *input,
int n_rows,
int n_cols,
const float *coef,
float intercept,
float *preds,
int loss) except +
cdef void sgdPredict(handle_t& handle,
const double *input,
int n_rows,
int n_cols,
const double *coef,
double intercept,
double *preds,
int loss) except +
cdef void sgdPredictBinaryClass(handle_t& handle,
const float *input,
int n_rows,
int n_cols,
const float *coef,
float intercept,
float *preds,
int loss) except +
cdef void sgdPredictBinaryClass(handle_t& handle,
const double *input,
int n_rows,
int n_cols,
const double *coef,
double intercept,
double *preds,
int loss) except +
class SGD(Base,
FMajorInputTagMixin):
"""
Stochastic Gradient Descent is a very common machine learning algorithm
where one optimizes some cost function via gradient steps. This makes SGD
very attractive for large problems when the exact solution is hard or even
impossible to find.
cuML's SGD algorithm accepts a numpy matrix or a cuDF DataFrame as the
input dataset. The SGD algorithm currently works with linear regression,
ridge regression and SVM models.
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import cudf
>>> from cuml.solvers import SGD as cumlSGD
>>> X = cudf.DataFrame()
>>> X['col1'] = np.array([1,1,2,2], dtype=np.float32)
>>> X['col2'] = np.array([1,2,2,3], dtype=np.float32)
>>> y = cudf.Series(np.array([1, 1, 2, 2], dtype=np.float32))
>>> pred_data = cudf.DataFrame()
>>> pred_data['col1'] = np.asarray([3, 2], dtype=np.float32)
>>> pred_data['col2'] = np.asarray([5, 5], dtype=np.float32)
>>> cu_sgd = cumlSGD(learning_rate='constant', eta0=0.005, epochs=2000,
... fit_intercept=True, batch_size=2,
... tol=0.0, penalty='none', loss='squared_loss')
>>> cu_sgd.fit(X, y)
SGD()
>>> cu_pred = cu_sgd.predict(pred_data).to_numpy()
>>> print(" cuML intercept : ", cu_sgd.intercept_) # doctest: +SKIP
cuML intercept : 0.00418...
>>> print(" cuML coef : ", cu_sgd.coef_) # doctest: +SKIP
cuML coef : 0 0.9841...
1 0.0097...
dtype: float32
>>> print("cuML predictions : ", cu_pred) # doctest: +SKIP
cuML predictions : [3.0055... 2.0214...]
Parameters
----------
loss : 'hinge', 'log', 'squared_loss' (default = 'squared_loss')
'hinge' uses linear SVM
'log' uses logistic regression
'squared_loss' uses linear regression
penalty : 'none', 'l1', 'l2', 'elasticnet' (default = 'none')
'none' does not perform any regularization
'l1' performs L1 norm (Lasso) which minimizes the sum of the abs value
of coefficients
'l2' performs L2 norm (Ridge) which minimizes the sum of the square of
the coefficients
'elasticnet' performs Elastic Net regularization which is a weighted
average of L1 and L2 norms
alpha : float (default = 0.0001)
The constant value which decides the degree of regularization
fit_intercept : boolean (default = True)
If True, the model tries to correct for the global mean of y.
If False, the model expects that you have centered the data.
epochs : int (default = 1000)
The number of times the model should iterate through the entire dataset
during training (default = 1000)
tol : float (default = 1e-3)
The training process will stop if current_loss > previous_loss - tol
shuffle : boolean (default = True)
True, shuffles the training data after each epoch
False, does not shuffle the training data after each epoch
eta0 : float (default = 0.001)
Initial learning rate
power_t : float (default = 0.5)
The exponent used for calculating the invscaling learning rate
batch_size : int (default=32)
The number of samples to use for each batch.
learning_rate : 'optimal', 'constant', 'invscaling', \
'adaptive' (default = 'constant')
Optimal option supported in the next version
constant keeps the learning rate constant
adaptive changes the learning rate if the training loss or the
validation accuracy does not improve for n_iter_no_change epochs.
The old learning rate is generally divide by 5
n_iter_no_change : int (default = 5)
The number of epochs to train without any improvement in the model
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
"""
coef_ = CumlArrayDescriptor()
classes_ = CumlArrayDescriptor()
def __init__(self, *, loss='squared_loss', penalty='none', alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, epochs=1000, tol=1e-3,
shuffle=True, learning_rate='constant', eta0=0.001,
power_t=0.5, batch_size=32, n_iter_no_change=5, handle=None,
output_type=None, verbose=False):
if loss in ['hinge', 'log', 'squared_loss']:
self.loss = loss
else:
msg = "loss {!r} is not supported"
raise TypeError(msg.format(loss))
if penalty is None:
penalty = 'none'
if penalty in ['none', 'l1', 'l2', 'elasticnet']:
self.penalty = penalty
else:
msg = "penalty {!r} is not supported"
raise TypeError(msg.format(penalty))
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.epochs = epochs
self.tol = tol
self.shuffle = shuffle
self.eta0 = eta0
self.power_t = power_t
if learning_rate in ['optimal', 'constant', 'invscaling', 'adaptive']:
self.learning_rate = learning_rate
if learning_rate in ["constant", "invscaling", "adaptive"]:
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if learning_rate == 'optimal':
self.lr_type = 0
raise TypeError("This option will be supported in the future")
# TODO: uncomment this when optimal learning rate is supported
# if self.alpha == 0:
# raise ValueError("alpha must be > 0 since "
# "learning_rate is 'optimal'. alpha is "
# "used to compute the optimal learning "
# " rate.")
elif learning_rate == 'constant':
self.lr_type = 1
self.lr = eta0
elif learning_rate == 'invscaling':
self.lr_type = 2
elif learning_rate == 'adaptive':
self.lr_type = 3
else:
msg = "learning rate {!r} is not supported"
raise TypeError(msg.format(learning_rate))
self.batch_size = batch_size
self.n_iter_no_change = n_iter_no_change
self.intercept_value = 0.0
self.coef_ = None
self.intercept_ = None
def _check_alpha(self, alpha):
for el in alpha:
if el <= 0.0:
msg = "alpha values have to be positive"
raise TypeError(msg.format(alpha))
def _get_loss_int(self):
return {
'squared_loss': 0,
'log': 1,
'hinge': 2,
}[self.loss]
def _get_penalty_int(self):
return {
'none': 0,
'l1': 1,
'l2': 2,
'elasticnet': 3
}[self.penalty]
@generate_docstring()
@cuml.internals.api_base_return_any(set_output_dtype=True)
def fit(self, X, y, convert_dtype=False) -> "SGD":
"""
Fit the model with X and y.
"""
X_m, n_rows, self.n_cols, self.dtype = \
input_to_cuml_array(X, check_dtype=[np.float32, np.float64])
y_m, _, _, _ = \
input_to_cuml_array(y, check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_rows=n_rows, check_cols=1)
_estimator_type = getattr(self, '_estimator_type', None)
if _estimator_type == "classifier":
self.classes_ = cp.unique(y_m)
cdef uintptr_t _X_ptr = X_m.ptr
cdef uintptr_t _y_ptr = y_m.ptr
self.n_alpha = 1
self.coef_ = CumlArray.zeros(self.n_cols,
dtype=self.dtype)
cdef uintptr_t _coef_ptr = self.coef_.ptr
cdef float _c_intercept_f32
cdef double _c_intercept_f64
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if self.dtype == np.float32:
sgdFit(handle_[0],
<float*>_X_ptr,
<int>n_rows,
<int>self.n_cols,
<float*>_y_ptr,
<float*>_coef_ptr,
<float*>&_c_intercept_f32,
<bool>self.fit_intercept,
<int>self.batch_size,
<int>self.epochs,
<int>self.lr_type,
<float>self.eta0,
<float>self.power_t,
<int>self._get_loss_int(),
<int>self._get_penalty_int(),
<float>self.alpha,
<float>self.l1_ratio,
<bool>self.shuffle,
<float>self.tol,
<int>self.n_iter_no_change)
self.intercept_ = _c_intercept_f32
else:
sgdFit(handle_[0],
<double*>_X_ptr,
<int>n_rows,
<int>self.n_cols,
<double*>_y_ptr,
<double*>_coef_ptr,
<double*>&_c_intercept_f64,
<bool>self.fit_intercept,
<int>self.batch_size,
<int>self.epochs,
<int>self.lr_type,
<double>self.eta0,
<double>self.power_t,
<int>self._get_loss_int(),
<int>self._get_penalty_int(),
<double>self.alpha,
<double>self.l1_ratio,
<bool>self.shuffle,
<double>self.tol,
<int>self.n_iter_no_change)
self.intercept_ = _c_intercept_f64
self.handle.sync()
del X_m
del y_m
return self
@generate_docstring(return_values={'name': 'preds',
'type': 'dense',
'description': 'Predicted values',
'shape': '(n_samples, 1)'})
def predict(self, X, convert_dtype=False) -> CumlArray:
"""
Predicts the y for X.
"""
X_m, _n_rows, _n_cols, self.dtype = \
input_to_cuml_array(X, check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_cols=self.n_cols)
cdef uintptr_t _X_ptr = X_m.ptr
cdef uintptr_t _coef_ptr = self.coef_.ptr
preds = CumlArray.zeros(_n_rows, dtype=self.dtype, index=X_m.index)
cdef uintptr_t _preds_ptr = preds.ptr
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if self.dtype == np.float32:
sgdPredict(handle_[0],
<float*>_X_ptr,
<int>_n_rows,
<int>_n_cols,
<float*>_coef_ptr,
<float>self.intercept_,
<float*>_preds_ptr,
<int>self._get_loss_int())
else:
sgdPredict(handle_[0],
<double*>_X_ptr,
<int>_n_rows,
<int>_n_cols,
<double*>_coef_ptr,
<double>self.intercept_,
<double*>_preds_ptr,
<int>self._get_loss_int())
self.handle.sync()
del X_m
return preds
@generate_docstring(return_values={'name': 'preds',
'type': 'dense',
'description': 'Predicted values',
'shape': '(n_samples, 1)'})
@cuml.internals.api_base_return_array(get_output_dtype=True)
def predictClass(self, X, convert_dtype=False) -> CumlArray:
"""
Predicts the y for X.
"""
X_m, _n_rows, _n_cols, dtype = \
input_to_cuml_array(X, check_dtype=self.dtype,
convert_to_dtype=(self.dtype if convert_dtype
else None),
check_cols=self.n_cols)
cdef uintptr_t _X_ptr = X_m.ptr
cdef uintptr_t _coef_ptr = self.coef_.ptr
preds = CumlArray.zeros(_n_rows, dtype=dtype, index=X_m.index)
cdef uintptr_t _preds_ptr = preds.ptr
IF GPUBUILD == 1:
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if dtype.type == np.float32:
sgdPredictBinaryClass(handle_[0],
<float*>_X_ptr,
<int>_n_rows,
<int>_n_cols,
<float*>_coef_ptr,
<float>self.intercept_,
<float*>_preds_ptr,
<int>self._get_loss_int())
else:
sgdPredictBinaryClass(handle_[0],
<double*>_X_ptr,
<int>_n_rows,
<int>_n_cols,
<double*>_coef_ptr,
<double>self.intercept_,
<double*>_preds_ptr,
<int>self._get_loss_int())
self.handle.sync()
del X_m
return preds
def get_param_names(self):
return super().get_param_names() + [
"loss",
"penalty",
"alpha",
"l1_ratio",
"fit_intercept",
"epochs",
"tol",
"shuffle",
"learning_rate",
"eta0",
"power_t",
"batch_size",
"n_iter_no_change",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tsa/arima.pxd | #
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
cdef extern from "cuml/tsa/arima_common.h" namespace "ML":
ctypedef struct ARIMAOrder:
int p # Basic order
int d
int q
int P # Seasonal order
int D
int Q
int s # Seasonal period
int k # Fit intercept?
int n_exog # Number of exogenous regressors
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tsa/batched_lbfgs.py | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.common import has_scipy
import cuml.internals.logger as logger
from cuml.internals.safe_imports import (
cpu_only_import,
gpu_only_import_from,
null_decorator,
)
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
np = cpu_only_import("numpy")
def _fd_fprime(x, f, h):
"""(internal) Computes finite difference."""
g = np.zeros(len(x))
for i in range(len(x)):
xph = np.copy(x)
xmh = np.copy(x)
xph[i] += h
xmh[i] -= h
fph = f(xph)
fmh = f(xmh)
g[i] = (fph - fmh) / (2 * h)
return g
@nvtx_annotate(message="LBFGS", domain="cuml_python")
def batched_fmin_lbfgs_b(
func,
x0,
num_batches,
fprime=None,
args=(),
bounds=None,
m=10,
factr=1e7,
pgtol=1e-5,
epsilon=1e-8,
iprint=-1,
maxiter=15000,
maxls=20,
):
"""A batch-aware L-BFGS-B implementation to minimize a loss function `f` given
an initial set of parameters `x0`.
Parameters
----------
func : function (x: array) -> array[M] (M = n_batches)
The function to minimize. The function should return an array of
size = `num_batches`
x0 : array
Starting parameters
fprime : function (x: array) -> array[M*n_params] (optional)
The gradient. Should return an array of derivatives for each
parameter over batches.
When omitted, uses Finite-differencing to estimate the gradient.
args : Tuple
Additional arguments to func and fprime
bounds : List[Tuple[float, float]]
Box-constrains on the parameters
m : int
L-BFGS parameter: number of previous arrays to store when
estimating inverse Hessian.
factr : float
Stopping criterion when function evaluation not progressing.
Stop when `|f(xk+1) - f(xk)| < factor*eps_mach`
where `eps_mach` is the machine precision
pgtol : float
Stopping criterion when gradient is sufficiently "flat".
Stop when |grad| < pgtol.
epsilon : float
Finite differencing step size when approximating `fprime`
iprint : int
-1 for no diagnostic info
n=1-100 for diagnostic info every n steps.
>100 for detailed diagnostic info
maxiter : int
Maximum number of L-BFGS iterations
maxls : int
Maximum number of line-search iterations.
"""
if has_scipy():
from scipy.optimize import _lbfgsb
else:
raise RuntimeError("Scipy is needed to run batched_fmin_lbfgs_b")
n = len(x0) // num_batches
if fprime is None:
def fprime_f(x):
return _fd_fprime(x, func, epsilon)
fprime = fprime_f
if bounds is None:
bounds = [(None, None)] * n
nbd = np.zeros(n, np.int32)
low_bnd = np.zeros(n, np.float64)
upper_bnd = np.zeros(n, np.float64)
bounds_map = {(None, None): 0, (1, None): 1, (1, 1): 2, (None, 1): 3}
for i in range(0, n):
lb, ub = bounds[i]
if lb is not None:
low_bnd[i] = lb
lb = 1
if ub is not None:
upper_bnd[i] = ub
ub = 1
nbd[i] = bounds_map[lb, ub]
# working arrays needed by L-BFGS-B implementation in SciPy.
# One for each series
x = [
np.copy(np.array(x0[ib * n : (ib + 1) * n], np.float64))
for ib in range(num_batches)
]
f = [np.copy(np.array(0.0, np.float64)) for ib in range(num_batches)]
g = [np.copy(np.zeros((n,), np.float64)) for ib in range(num_batches)]
wa = [
np.copy(np.zeros(2 * m * n + 5 * n + 11 * m * m + 8 * m, np.float64))
for ib in range(num_batches)
]
iwa = [np.copy(np.zeros(3 * n, np.int32)) for ib in range(num_batches)]
task = [np.copy(np.zeros(1, "S60")) for ib in range(num_batches)]
csave = [np.copy(np.zeros(1, "S60")) for ib in range(num_batches)]
lsave = [np.copy(np.zeros(4, np.int32)) for ib in range(num_batches)]
isave = [np.copy(np.zeros(44, np.int32)) for ib in range(num_batches)]
dsave = [np.copy(np.zeros(29, np.float64)) for ib in range(num_batches)]
for ib in range(num_batches):
task[ib][:] = "START"
n_iterations = np.zeros(num_batches, dtype=np.int32)
converged = num_batches * [False]
warn_flag = np.zeros(num_batches)
while not all(converged):
with nvtx_annotate("LBFGS-ITERATION", domain="cuml_python"):
for ib in range(num_batches):
if converged[ib]:
continue
_lbfgsb.setulb(
m,
x[ib],
low_bnd,
upper_bnd,
nbd,
f[ib],
g[ib],
factr,
pgtol,
wa[ib],
iwa[ib],
task[ib],
iprint,
csave[ib],
lsave[ib],
isave[ib],
dsave[ib],
maxls,
)
xk = np.concatenate(x)
fk = func(xk)
gk = fprime(xk)
for ib in range(num_batches):
if converged[ib]:
continue
task_str = task[ib].tobytes()
task_str_strip = task[ib].tobytes().strip(b"\x00").strip()
if task_str.startswith(b"FG"):
# needs function evaluation
f[ib] = fk[ib]
g[ib] = gk[ib * n : (ib + 1) * n]
elif task_str.startswith(b"NEW_X"):
n_iterations[ib] += 1
if n_iterations[ib] >= maxiter:
task[ib][
:
] = "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT"
elif task_str_strip.startswith(b"CONV"):
converged[ib] = True
warn_flag[ib] = 0
else:
converged[ib] = True
warn_flag[ib] = 2
continue
xk = np.concatenate(x)
if iprint > 0:
logger.info(
"CONVERGED in ({}-{}) iterations (|\\/f|={})".format(
np.min(n_iterations),
np.max(n_iterations),
np.linalg.norm(fprime(xk), np.inf),
)
)
if (warn_flag > 0).any():
for ib in range(num_batches):
if warn_flag[ib] > 0:
logger.info(
"WARNING: id={} convergence issue: {}".format(
ib, task[ib].tobytes()
)
)
return xk, n_iterations, warn_flag
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tsa/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources "")
add_module_gpu_default("arima.pyx" ${arima_algo} ${tsa_algo})
add_module_gpu_default("auto_arima.pyx" ${auto_arima_algo} ${tsa_algo})
add_module_gpu_default("holtwinters.pyx" ${holtwinters_algo} ${tsa_algo})
add_module_gpu_default("seasonality.pyx" ${seasonality_algo} ${tsa_algo})
add_module_gpu_default("stationarity.pyx" ${stationarity_algo} ${tsa_algo})
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${cuml_sg_libraries}"
MODULE_PREFIX tsa_
ASSOCIATED_TARGETS cuml
)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tsa/seasonality.pyx | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.internals.input_utils import input_to_host_array, input_to_cuml_array
np = cpu_only_import('numpy')
# TODO: #2234 and #2235
def python_seas_test(y, batch_size, n_obs, s, threshold=0.64):
"""Python prototype to be ported later in CUDA
"""
# TODO: our own implementation of STL
from statsmodels.tsa.seasonal import STL
results = []
for i in range(batch_size):
stlfit = STL(y[:, i], s).fit()
seasonal = stlfit.seasonal
residual = stlfit.resid
heuristics = max(
0, min(1, 1 - np.var(residual)/ np.var(residual + seasonal)))
results.append(heuristics > threshold)
return results
@cuml.internals.api_return_array(input_arg="y", get_output_type=True)
def seas_test(y, s, handle=None) -> CumlArray:
"""
Perform Wang, Smith & Hyndman's test to decide whether seasonal
differencing is needed
Parameters
----------
y : dataframe or array-like (device or host)
The time series data, assumed to have each time series in columns.
Acceptable formats: cuDF DataFrame, cuDF Series, NumPy ndarray,
Numba device ndarray, cuda array interface compliant array like CuPy.
s: integer
Seasonal period (s > 1)
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
Returns
-------
stationarity : List[bool]
For each series in the batch, whether it needs seasonal differencing
"""
if s <= 1:
raise ValueError(
"ERROR: Invalid period for the seasonal differencing test: {}"
.format(s))
# At the moment we use a host array
h_y, n_obs, batch_size, _ = \
input_to_host_array(y, check_dtype=[np.float32, np.float64])
# Temporary: Python implementation
python_res = python_seas_test(h_y, batch_size, n_obs, s)
d_res, *_ = input_to_cuml_array(np.array(python_res), check_dtype=bool)
return d_res
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tsa/holtwinters.pyx | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import('cudf')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from libc.stdint cimport uintptr_t
import cuml.internals
from cuml.internals.input_utils import input_to_cupy_array
from cuml.internals import _deprecate_pos_args
from cuml.common import using_output_type
from cuml.internals.base import Base
from cuml.internals.array import CumlArray
from cuml.common.array_descriptor import CumlArrayDescriptor
from pylibraft.common.handle cimport handle_t
cdef extern from "cuml/tsa/holtwinters_params.h" namespace "ML":
enum SeasonalType:
ADDITIVE
MULTIPLICATIVE
cdef extern from "cuml/tsa/holtwinters.h" namespace "ML::HoltWinters":
cdef void buffer_size(
int n, int batch_size, int frequency,
int *start_leveltrend_len, int *start_season_len,
int *components_len, int *error_len,
int *leveltrend_coef_shift, int *season_coef_shift) except +
cdef void fit(
handle_t &handle, int n, int batch_size,
int frequency, int start_periods, SeasonalType seasonal,
float epsilon,
float *data, float *level_ptr, float *trend_ptr,
float *season_ptr, float *SSE_error_ptr) except +
cdef void fit(
handle_t &handle, int n, int batch_size,
int frequency, int start_periods, SeasonalType seasonal,
double epsilon,
double *data, double *level_ptr, double *trend_ptr,
double *season_ptr, double *SSE_error_ptr) except +
cdef void forecast(
handle_t &handle, int n, int batch_size, int frequency,
int h, SeasonalType seasonal, float *level_ptr,
float *trend_ptr, float *season_ptr, float *forecast_ptr) except +
cdef void forecast(
handle_t &handle, int n, int batch_size, int frequency,
int h, SeasonalType seasonal, double *level_ptr,
double *trend_ptr, double *season_ptr, double *forecast_ptr) except +
class ExponentialSmoothing(Base):
"""
Implements a HoltWinters time series analysis model which is used in
both forecasting future entries in a time series as well as in providing
exponential smoothing, where weights are assigned against historical
data with exponentially decreasing impact. This is done by analyzing
three components of the data: level, trend, and seasonality.
Notes
-----
*Known Limitations:* This version of ExponentialSmoothing currently
provides only a limited number of features when compared to the
`statsmodels.holtwinters.ExponentialSmoothing` model. Noticeably, it lacks:
* predict : no support for in-sample prediction.
* https://github.com/rapidsai/cuml/issues/875
* hessian : no support for returning Hessian matrix.
* https://github.com/rapidsai/cuml/issues/880
* information : no support for returning Fisher matrix.
* https://github.com/rapidsai/cuml/issues/880
* loglike : no support for returning Log-likelihood.
* https://github.com/rapidsai/cuml/issues/880
Additionally, be warned that there may exist floating point instability
issues in this model. Small values in endog may lead to faulty results.
See https://github.com/rapidsai/cuml/issues/888 for more information.
*Known Differences:* This version of ExponentialSmoothing differs from
statsmodels in some other minor ways:
* Cannot pass trend component or damped trend component
* this version can take additional parameters `eps`,
`start_periods`, `ts_num`, and `handle`
* Score returns SSE rather than gradient logL
https://github.com/rapidsai/cuml/issues/876
* This version provides get_level(), get_trend(), get_season()
Examples
--------
.. code-block:: python
>>> from cuml import ExponentialSmoothing
>>> import cudf
>>> import cupy as cp
>>> data = cudf.Series([1, 2, 3, 4, 5, 6,
... 7, 8, 9, 10, 11, 12,
... 2, 3, 4, 5, 6, 7,
... 8, 9, 10, 11, 12, 13,
... 3, 4, 5, 6, 7, 8, 9,
... 10, 11, 12, 13, 14],
... dtype=cp.float64)
>>> cu_hw = ExponentialSmoothing(data, seasonal_periods=12)
>>> cu_hw.fit()
ExponentialSmoothing()
>>> cu_pred = cu_hw.forecast(4)
>>> print('Forecasted points:', cu_pred) # doctest: +SKIP
Forecasted points :
0 4.000143766093652
1 5.000000163513641
2 6.000000000174092
3 7.000000000000178
Parameters
----------
endog : array-like (device or host)
Acceptable formats: cuDF DataFrame, cuDF Series,
NumPy ndarray, Numba device ndarray, cuda array interface
compliant array like CuPy.
Note: cuDF.DataFrame types assumes data is in columns,
while all other datatypes assume data is in rows.
The endogenous dataset to be operated on.
seasonal : 'additive', 'add', 'multiplicative', 'mul' \
(default = 'additive')
Whether the seasonal trend should be calculated
additively or multiplicatively.
seasonal_periods : int (default=2)
The seasonality of the data (how often it
repeats). For monthly data this should be 12,
for weekly data, this should be 7.
start_periods : int (default=2)
Number of seasons to be used for seasonal seed values
ts_num : int (default=1)
The number of different time series that were passed
in the endog param.
eps : np.number > 0 (default=2.24e-3)
The accuracy to which gradient descent should achieve.
Note that changing this value may affect the forecasted results.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
"""
forecasted_points = CumlArrayDescriptor()
level = CumlArrayDescriptor()
trend = CumlArrayDescriptor()
season = CumlArrayDescriptor()
SSE = CumlArrayDescriptor()
@_deprecate_pos_args(version="21.06")
def __init__(self, endog, *, seasonal="additive",
seasonal_periods=2, start_periods=2,
ts_num=1, eps=2.24e-3, handle=None,
verbose=False, output_type=None):
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
# Total number of Time Series for forecasting
if not isinstance(ts_num, int):
raise TypeError("Type of ts_num must be int. Given: " +
type(ts_num))
if ts_num <= 0:
raise ValueError("Must state at least 1 series. Given: " +
str(ts_num))
self.ts_num = ts_num
# Season length in the time series
if not isinstance(seasonal_periods, int):
raise TypeError("Type of seasonal_periods must be int."
" Given: " + type(seasonal_periods))
if seasonal_periods < 2:
raise ValueError("Frequency must be >= 2. Given: " +
str(seasonal_periods))
self.seasonal_periods = seasonal_periods
# whether to perform additive or multiplicative STL decomposition
if seasonal in ["additive", "add"]:
self.seasonal = "add"
self._cpp_stype = ADDITIVE
elif seasonal in ["multiplicative", "mul"]:
self.seasonal = "mul"
self._cpp_stype = MULTIPLICATIVE
else:
raise ValueError("Seasonal must be either "
"\"additive\" or \"multiplicative\".")
# number of seasons to be used for seasonal seed values
if not isinstance(start_periods, int):
raise TypeError("Type of start_periods must be int. Given: " +
type(start_periods))
if start_periods < 2:
raise ValueError("Start Periods must be >= 2. Given: " +
str(start_periods))
if seasonal_periods < start_periods:
raise ValueError("Seasonal_Periods (" + str(seasonal_periods) +
") cannot be less than start_periods (" +
str(start_periods) + ").")
self.start_periods = start_periods
if not np.issubdtype(type(eps), np.number):
raise TypeError("Epsilon provided is of type " + type(eps) +
" and thus cannot be cast to float() or double()")
if eps <= 0:
raise ValueError("Epsilon must be positive. Given: " + eps)
# Set up attributes:
self.eps = eps
self.endog = endog
self.forecasted_points = [] # list for final forecast output
self.level = [] # list for level values for each time series in batch
self.trend = [] # list for trend values for each time series in batch
self.season = [] # list for season values for each series in batch
self.SSE = [] # SSE for all time series in batch
self.fit_executed_flag = False
self.h = 0
def _check_dims(self, ts_input, is_cudf=False) -> CumlArray:
err_mess = ("ExponentialSmoothing initialized with "
+ str(self.ts_num) +
" time series, but data has dimension ")
is_cudf = isinstance(ts_input, cudf.DataFrame)
mod_ts_input = input_to_cupy_array(ts_input, order="C").array
if len(mod_ts_input.shape) == 1:
self.n = mod_ts_input.shape[0]
if self.ts_num != 1:
raise ValueError(err_mess + "1.")
elif len(ts_input.shape) == 2:
if(is_cudf):
d1 = mod_ts_input.shape[0]
d2 = mod_ts_input.shape[1]
mod_ts_input = mod_ts_input.reshape((d1*d2,))
else:
d1 = mod_ts_input.shape[1]
d2 = mod_ts_input.shape[0]
mod_ts_input = mod_ts_input.ravel()
self.n = d1
if self.ts_num != d2:
raise ValueError(err_mess + str(d2))
else:
raise ValueError("Data input must have 1 or 2 dimensions.")
return mod_ts_input
@cuml.internals.api_base_return_any_skipall
def fit(self) -> "ExponentialSmoothing":
"""
Perform fitting on the given `endog` dataset.
Calculates the level, trend, season, and SSE components.
"""
X_m = self._check_dims(self.endog)
self.dtype = X_m.dtype
if self.n < self.start_periods*self.seasonal_periods:
raise ValueError("Length of time series (" + str(self.n) +
") must be at least freq*start_periods (" +
str(self.start_periods*self.seasonal_periods) +
").")
if self.n <= 0:
raise ValueError("Time series must contain at least 1 value."
" Given: " + str(self.n))
cdef uintptr_t input_ptr
cdef int leveltrend_seed_len, season_seed_len, components_len
cdef int leveltrend_coef_offset, season_coef_offset
cdef int error_len
input_ptr = X_m.ptr
buffer_size(<int> self.n, <int> self.ts_num,
<int> self.seasonal_periods,
<int*> &leveltrend_seed_len,
<int*> &season_seed_len,
<int*> &components_len,
<int*> &leveltrend_coef_offset,
<int*> &season_coef_offset,
<int*> &error_len)
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef uintptr_t level_ptr, trend_ptr, season_ptr, SSE_ptr
self.level = CumlArray.zeros(components_len, dtype=self.dtype)
self.trend = CumlArray.zeros(components_len, dtype=self.dtype)
self.season = CumlArray.zeros(components_len, dtype=self.dtype)
self.SSE = CumlArray.zeros(self.ts_num, dtype=self.dtype)
level_ptr = self.level.ptr
trend_ptr = self.trend.ptr
season_ptr = self.season.ptr
SSE_ptr = self.SSE.ptr
cdef float eps_f = np.float32(self.eps)
cdef double eps_d = np.float64(self.eps)
if self.dtype == np.float32:
fit(handle_[0], <int> self.n, <int> self.ts_num,
<int> self.seasonal_periods, <int> self.start_periods,
<SeasonalType> self._cpp_stype,
<float> eps_f,
<float*> input_ptr, <float*> level_ptr,
<float*> trend_ptr, <float*> season_ptr,
<float*> SSE_ptr)
elif self.dtype == np.float64:
fit(handle_[0], <int> self.n, <int> self.ts_num,
<int> self.seasonal_periods, <int> self.start_periods,
<SeasonalType> self._cpp_stype,
<double> eps_d,
<double*> input_ptr, <double*> level_ptr,
<double*> trend_ptr, <double*> season_ptr,
<double*> SSE_ptr)
else:
raise TypeError("ExponentialSmoothing supports only float32"
" and float64 input, but input type "
+ str(self.dtype) + " passed.")
num_rows = int(components_len/self.ts_num)
with using_output_type("cupy"):
self.level = self.level.reshape((self.ts_num, num_rows), order='F')
self.trend = self.trend.reshape((self.ts_num, num_rows), order='F')
self.season = self.season.reshape((self.ts_num, num_rows),
order='F')
self.handle.sync()
self.fit_executed_flag = True
del X_m
return self
def forecast(self, h=1, index=None):
"""
Forecasts future points based on the fitted model.
Parameters
----------
h : int (default=1)
The number of points for each series to be forecasted.
index : int (default=None)
The index of the time series from which you want
forecasted points. if None, then a cudf.DataFrame of
the forecasted points from all time series is returned.
Returns
-------
preds : cudf.DataFrame or cudf.Series
Series of forecasted points if index is provided.
DataFrame of all forecasted points if index=None.
"""
cdef uintptr_t forecast_ptr, level_ptr, trend_ptr, season_ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
if not isinstance(h, int) or (not isinstance(index, int) and index is not None):
raise TypeError("Input arguments must be of type int."
"Index has type: " + str(type(index))
+ "\nh has type: " + str(type(h)))
if self.fit_executed_flag:
if h <= 0:
raise ValueError("h must be > 0. Currently: " + str(h))
if h > self.h:
self.h = h
self.forecasted_points = CumlArray.zeros(self.ts_num*h,
dtype=self.dtype)
with using_output_type("cuml"):
forecast_ptr = self.forecasted_points.ptr
level_ptr = self.level.ptr
trend_ptr = self.trend.ptr
season_ptr = self.season.ptr
if self.dtype == np.float32:
forecast(handle_[0], <int> self.n,
<int> self.ts_num,
<int> self.seasonal_periods,
<int> h,
<SeasonalType> self._cpp_stype,
<float*> level_ptr,
<float*> trend_ptr,
<float*> season_ptr,
<float*> forecast_ptr)
elif self.dtype == np.float64:
forecast(handle_[0], <int> self.n,
<int> self.ts_num,
<int> self.seasonal_periods, <int> h,
<SeasonalType> self._cpp_stype,
<double*> level_ptr,
<double*> trend_ptr,
<double*> season_ptr,
<double*> forecast_ptr)
with using_output_type("cupy"):
self.forecasted_points =\
self.forecasted_points.reshape((self.ts_num, h),
order='F')
self.handle.sync()
if index is None:
if self.ts_num == 1:
return cudf.Series(
self.forecasted_points.ravel(order='F')[:h])
else:
return cudf.DataFrame(
self.forecasted_points[:, :h].T)
else:
if index < 0 or index >= self.ts_num:
raise IndexError("Index input: " + str(index) +
" outside of range [0, " +
str(self.ts_num) + "]")
return cudf.Series(cp.asarray(
self.forecasted_points[index, :h]))
else:
raise ValueError("Fit() the model before forecast()")
def score(self, index=None):
"""
Returns the score of the model.
.. note:: Currently returns the SSE, rather than the gradient of the
LogLikelihood. https://github.com/rapidsai/cuml/issues/876
Parameters
----------
index : int (default=None)
The index of the time series from which the SSE will be
returned. if None, then all SSEs are returned in a cudf
Series.
Returns
-------
score : np.float32, np.float64, or cudf.Series
The SSE of the fitted model.
"""
if self.fit_executed_flag:
if index is None:
return cudf.Series(self.SSE)
elif index < 0 or index >= self.ts_num:
raise IndexError("Index input: " + str(index) + " outside of "
"range [0, " + str(self.ts_num) + "]")
else:
return self.SSE[index]
else:
raise ValueError("Fit() the model before score()")
def get_level(self, index=None):
"""
Returns the level component of the model.
Parameters
----------
index : int (default=None)
The index of the time series from which the level will be
returned. if None, then all level components are returned
in a cudf.Series.
Returns
-------
level : cudf.Series or cudf.DataFrame
The level component of the fitted model
"""
if self.fit_executed_flag:
if index is None:
if self.ts_num == 1:
return cudf.Series(self.level.ravel(order='F'))
else:
return cudf.DataFrame(self.level.T)
else:
if index < 0 or index >= self.ts_num:
raise IndexError("Index input: " + str(index) + " outside "
"of range [0, " + str(self.ts_num) + "]")
else:
return cudf.Series(cp.asarray(self.level[index]))
else:
raise ValueError("Fit() the model to get level values")
def get_trend(self, index=None):
"""
Returns the trend component of the model.
Parameters
----------
index : int (default=None)
The index of the time series from which the trend will be
returned. if None, then all trend components are returned
in a cudf.Series.
Returns
-------
trend : cudf.Series or cudf.DataFrame
The trend component of the fitted model.
"""
if self.fit_executed_flag:
if index is None:
if self.ts_num == 1:
return cudf.Series(self.trend.ravel(order='F'))
else:
return cudf.DataFrame(self.trend.T)
else:
if index < 0 or index >= self.ts_num:
raise IndexError("Index input: " + str(index) + " outside "
"of range [0, " + str(self.ts_num) + "]")
else:
return cudf.Series(cp.asarray(self.trend[index]))
else:
raise ValueError("Fit() the model to get trend values")
def get_season(self, index=None):
"""
Returns the season component of the model.
Parameters
----------
index : int (default=None)
The index of the time series from which the season will be
returned. if None, then all season components are returned
in a cudf.Series.
Returns
-------
season: cudf.Series or cudf.DataFrame
The season component of the fitted model
"""
if self.fit_executed_flag:
if index is None:
if self.ts_num == 1:
return cudf.Series(self.season.ravel(order='F'))
else:
return cudf.DataFrame(self.season.T)
else:
if index < 0 or index >= self.ts_num:
raise IndexError("Index input: " + str(index) + " outside "
"of range [0, " + str(self.ts_num) + "]")
else:
return cudf.Series(cp.asarray(self.season[index]))
else:
raise ValueError("Fit() the model to get season values")
def get_param_names(self):
return super().get_param_names() + [
"endog",
"seasonal",
"seasonal_periods",
"start_periods",
"ts_num",
"eps",
]
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tsa/arima.pyx | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import (
cpu_only_import,
gpu_only_import_from,
null_decorator
)
np = cpu_only_import('numpy')
nvtx_annotate = gpu_only_import_from("nvtx", "annotate", alt=null_decorator)
from libc.stdint cimport uintptr_t
from libcpp cimport bool
from libcpp.vector cimport vector
from typing import Tuple, Dict, Mapping, Optional, Union
import cuml.internals
from cuml.internals.array import CumlArray
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.base import Base
from pylibraft.common.handle cimport handle_t
from cuml.tsa.batched_lbfgs import batched_fmin_lbfgs_b
import cuml.internals.logger as logger
from cuml.common import has_scipy
from cuml.internals.input_utils import input_to_cuml_array
from cuml.internals import _deprecate_pos_args
cdef extern from "cuml/tsa/arima_common.h" namespace "ML":
cdef cppclass ARIMAParams[DataT]:
DataT* mu
DataT* beta
DataT* ar
DataT* ma
DataT* sar
DataT* sma
DataT* sigma2
cdef cppclass ARIMAMemory[DataT]:
ARIMAMemory(const ARIMAOrder& order, int batch_size, int n_obs,
char* in_buf)
@staticmethod
size_t compute_size(const ARIMAOrder& order, int batch_size, int n_obs)
cdef extern from "cuml/tsa/batched_arima.hpp" namespace "ML":
ctypedef enum LoglikeMethod: CSS, MLE
void cpp_pack "pack" (
handle_t& handle, const ARIMAParams[double]& params,
const ARIMAOrder& order, int batch_size, double* param_vec)
void cpp_unpack "unpack" (
handle_t& handle, ARIMAParams[double]& params,
const ARIMAOrder& order, int batch_size, const double* param_vec)
bool detect_missing(
handle_t& handle, const double* d_y, int n_elem)
void batched_diff(
handle_t& handle, double* d_y_diff, const double* d_y, int batch_size,
int n_obs, const ARIMAOrder& order)
void batched_loglike(
handle_t& handle, const ARIMAMemory[double]& arima_mem,
const double* y, const double* d_exog, int batch_size, int nobs,
const ARIMAOrder& order, const double* params, double* loglike,
bool trans, bool host_loglike, LoglikeMethod method, int truncate)
void batched_loglike(
handle_t& handle, const ARIMAMemory[double]& arima_mem,
const double* y, const double* d_exog, int batch_size, int n_obs,
const ARIMAOrder& order, const ARIMAParams[double]& params,
double* loglike, bool trans, bool host_loglike, LoglikeMethod method,
int truncate)
void batched_loglike_grad(
handle_t& handle, const ARIMAMemory[double]& arima_mem,
const double* d_y, const double* d_exog, int batch_size, int nobs,
const ARIMAOrder& order, const double* d_x, double* d_grad, double h,
bool trans, LoglikeMethod method, int truncate)
void cpp_predict "predict" (
handle_t& handle, const ARIMAMemory[double]& arima_mem,
const double* d_y, const double* d_exog, const double* d_exog_fut,
int batch_size, int nobs, int start, int end, const ARIMAOrder& order,
const ARIMAParams[double]& params, double* d_y_p, bool pre_diff,
double level, double* d_lower, double* d_upper)
void information_criterion(
handle_t& handle, const ARIMAMemory[double]& arima_mem,
const double* d_y, const double* d_exog, int batch_size, int nobs,
const ARIMAOrder& order, const ARIMAParams[double]& params,
double* ic, int ic_type)
void estimate_x0(
handle_t& handle, ARIMAParams[double]& params, const double* d_y,
const double* d_exog, int batch_size, int nobs,
const ARIMAOrder& order, bool missing)
cdef extern from "cuml/tsa/batched_kalman.hpp" namespace "ML":
void batched_jones_transform(
handle_t& handle, ARIMAMemory[double]& arima_mem,
const ARIMAOrder& order, int batchSize, bool isInv,
const double* h_params, double* h_Tparams)
cdef class ARIMAParamsWrapper:
"""A wrapper class for ARIMAParams"""
cdef ARIMAParams[double] params
def __cinit__(self, model):
cdef ARIMAOrder order = model.order
cdef uintptr_t d_mu_ptr = \
model.mu_.ptr if order.k else <uintptr_t> NULL
cdef uintptr_t d_beta_ptr = \
model.beta_.ptr if order.n_exog else <uintptr_t> NULL
cdef uintptr_t d_ar_ptr = \
model.ar_.ptr if order.p else <uintptr_t> NULL
cdef uintptr_t d_ma_ptr = \
model.ma_.ptr if order.q else <uintptr_t> NULL
cdef uintptr_t d_sar_ptr = \
model.sar_.ptr if order.P else <uintptr_t> NULL
cdef uintptr_t d_sma_ptr = \
model.sma_.ptr if order.Q else <uintptr_t> NULL
cdef uintptr_t d_sigma2_ptr = <uintptr_t> model.sigma2_.ptr
self.params.mu = <double*> d_mu_ptr
self.params.beta = <double*> d_beta_ptr
self.params.ar = <double*> d_ar_ptr
self.params.ma = <double*> d_ma_ptr
self.params.sar = <double*> d_sar_ptr
self.params.sma = <double*> d_sma_ptr
self.params.sigma2 = <double*> d_sigma2_ptr
class ARIMA(Base):
"""
Implements a batched ARIMA model for in- and out-of-sample
time-series prediction, with support for seasonality (SARIMA)
ARIMA stands for Auto-Regressive Integrated Moving Average.
See https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average
This class can fit an ARIMA(p,d,q) or ARIMA(p,d,q)(P,D,Q)_s model to a
batch of time series of the same length (or various lengths, using missing
values at the start for padding).
The implementation is designed to give the best performance when using
large batches of time series.
Parameters
----------
endog : dataframe or array-like (device or host)
Endogenous variable, assumed to have each time series in columns.
Acceptable formats: cuDF DataFrame, cuDF Series, NumPy ndarray,
Numba device ndarray, cuda array interface compliant array like CuPy.
Missing values are accepted, represented by NaN.
order : Tuple[int, int, int] (default=(1,1,1))
The ARIMA order (p, d, q) of the model
seasonal_order : Tuple[int, int, int, int] (default=(0,0,0,0))
The seasonal ARIMA order (P, D, Q, s) of the model
exog : dataframe or array-like (device or host) (default=None)
Exogenous variables, assumed to have each time series in columns,
such that variables associated with a same batch member are adjacent
(number of columns: n_exog * batch_size)
Acceptable formats: cuDF DataFrame, cuDF Series, NumPy ndarray,
Numba device ndarray, cuda array interface compliant array like CuPy.
Missing values are not supported.
fit_intercept : bool or int (default = True)
Whether to include a constant trend mu in the model
simple_differencing : bool or int (default = True)
If True, the data is differenced before being passed to the Kalman
filter. If False, differencing is part of the state-space model.
In some cases this setting can be ignored: computing forecasts with
confidence intervals will force it to False ; fitting with the CSS
method will force it to True.
Note: that forecasts are always for the original series, whereas
statsmodels computes forecasts for the differenced series when
simple_differencing is True.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
convert_dtype : boolean
When set to True, the model will automatically convert the inputs to
np.float64.
Attributes
----------
order : ARIMAOrder
The ARIMA order of the model (p, d, q, P, D, Q, s, k, n_exog)
d_y : device array
Time series data on device
n_obs : int
Number of observations
batch_size : int
Number of time series in the batch
dtype : numpy.dtype
Floating-point type of the data and parameters
niter : numpy.ndarray
After fitting, contains the number of iterations before convergence
for each time series.
Notes
-----
*Performance:* Let :math:`r=max(p+s*P, q+s*Q+1)`. The device memory used
for most operations is
:math:\
`O(\\mathtt{batch\\_size}*\\mathtt{n\\_obs} + \\mathtt{batch\\_size}*r^2)`.
The execution time is a linear function of `n_obs` and `batch_size`
(if `batch_size` is large), but grows very fast with `r`.
The performance is optimized for very large batch sizes (e.g thousands of
series).
References
----------
This class is heavily influenced by the Python library `statsmodels`,
particularly `statsmodels.tsa.statespace.sarimax.SARIMAX`.
See https://www.statsmodels.org/stable/statespace.html.
Additionally the following book is a useful reference:
"Time Series Analysis by State Space Methods",
J. Durbin, S.J. Koopman, 2nd Edition (2012).
Examples
--------
.. code-block:: python
>>> import cupy as cp
>>> from cuml.tsa.arima import ARIMA
>>> # Create seasonal data with a trend, a seasonal pattern and noise
>>> n_obs = 100
>>> cp.random.seed(12)
>>> x = cp.linspace(0, 1, n_obs)
>>> pattern = cp.array([[0.05, 0.0], [0.07, 0.03],
... [-0.03, 0.05], [0.02, 0.025]])
>>> noise = cp.random.normal(scale=0.01, size=(n_obs, 2))
>>> y = (cp.column_stack((0.5*x, -0.25*x)) + noise
... + cp.tile(pattern, (25, 1)))
>>> # Fit a seasonal ARIMA model
>>> model = ARIMA(y,
... order=(0,1,1),
... seasonal_order=(0,1,1,4),
... fit_intercept=False)
>>> model.fit()
ARIMA(...)
>>> # Forecast
>>> fc = model.forecast(10)
>>> print(fc) # doctest: +SKIP
[[ 0.55204599 -0.25681163]
[ 0.57430705 -0.2262438 ]
[ 0.48120315 -0.20583011]
[ 0.535594 -0.24060046]
[ 0.57207541 -0.26695497]
[ 0.59433647 -0.23638713]
[ 0.50123257 -0.21597344]
[ 0.55562342 -0.25074379]
[ 0.59210483 -0.27709831]
[ 0.61436589 -0.24653047]]
"""
d_y = CumlArrayDescriptor()
# TODO: (MDD) Should this be public? Its not listed in the attributes doc
_d_y_diff = CumlArrayDescriptor()
_temp_mem = CumlArrayDescriptor()
mu_ = CumlArrayDescriptor()
beta_ = CumlArrayDescriptor()
ar_ = CumlArrayDescriptor()
ma_ = CumlArrayDescriptor()
sar_ = CumlArrayDescriptor()
sma_ = CumlArrayDescriptor()
sigma2_ = CumlArrayDescriptor()
@_deprecate_pos_args(version="21.06")
def __init__(self,
endog,
*,
order: Tuple[int, int, int] = (1, 1, 1),
seasonal_order: Tuple[int, int, int, int] = (0, 0, 0, 0),
exog=None,
fit_intercept=True,
simple_differencing=True,
handle=None,
verbose=False,
output_type=None,
convert_dtype=True):
if not has_scipy():
raise RuntimeError("Scipy is needed to run cuML's ARIMA estimator."
" Please install it to enable ARIMA "
"estimation.")
# Initialize base class
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self._set_base_attributes(output_type=endog)
# Check validity of the ARIMA order and seasonal order
p, d, q = order
P, D, Q, s = seasonal_order
if P + D + Q > 0 and s < 2:
raise ValueError("ERROR: Invalid period for seasonal ARIMA: {}"
.format(s))
if d + D > 2:
raise ValueError("ERROR: Invalid order. Required: d+D <= 2")
if s != 0 and (p >= s or q >= s):
raise ValueError("ERROR: Invalid order. Required: s > p, s > q")
if p + q + P + Q + int(fit_intercept) == 0:
raise ValueError("ERROR: Invalid order. At least one parameter"
" among p, q, P, Q and fit_intercept must be"
" non-zero")
if p > 8 or P > 8 or q > 8 or Q > 8:
raise ValueError("ERROR: Invalid order. Required: p,q,P,Q <= 8")
if max(p + s * P, q + s * Q) > 1024:
raise ValueError("ERROR: Invalid order. "
"Required: max(p+s*P, q+s*Q) <= 1024")
# Endogenous variable. Float64 only for now.
self.d_y, self.n_obs, self.batch_size, self.dtype \
= input_to_cuml_array(
endog, check_dtype=np.float64,
convert_to_dtype=(np.float64 if convert_dtype else None))
if self.n_obs < d + s * D + 1:
raise ValueError("ERROR: Number of observations too small for the"
" given order")
# Exogenous variables
if exog is not None:
self.d_exog, n_obs_exog, n_cols_exog, _ \
= input_to_cuml_array(exog, check_dtype=np.float64)
if n_cols_exog % self.batch_size != 0:
raise ValueError("Number of columns in exog is not a multiple"
" of batch_size")
if n_obs_exog != self.n_obs:
raise ValueError("Number of observations mismatch between"
" endog and exog")
n_exog = n_cols_exog // self.batch_size
else:
n_exog = 0
# Set the ARIMA order
cdef ARIMAOrder cpp_order
cpp_order.p, cpp_order.d, cpp_order.q = order
cpp_order.P, cpp_order.D, cpp_order.Q, cpp_order.s = seasonal_order
cpp_order.k = int(fit_intercept)
cpp_order.n_exog = n_exog
self.order = cpp_order
self.simple_differencing = simple_differencing
self._d_y_diff = CumlArray.empty(
(self.n_obs - d - s * D, self.batch_size), self.dtype)
if n_exog > 0:
self._d_exog_diff = CumlArray.empty(
(self.n_obs - d - s * D, self.batch_size * n_exog),
self.dtype)
self.n_obs_diff = self.n_obs - d - D * s
# Allocate temporary storage
temp_mem_size = ARIMAMemory[double].compute_size(
cpp_order, <int> self.batch_size, <int> self.n_obs)
self._temp_mem = CumlArray.empty(temp_mem_size, np.byte)
self._initial_calc()
@cuml.internals.api_base_return_any_skipall
def _initial_calc(self):
"""
This separates the initial calculation from the initialization to make
the CumlArrayDescriptors work
"""
cdef uintptr_t d_y_ptr = self.d_y.ptr
cdef uintptr_t d_y_diff_ptr = self._d_y_diff.ptr
cdef uintptr_t d_exog_ptr
cdef uintptr_t d_exog_diff_ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef ARIMAOrder cpp_order_diff = self.order
# Detect missing observations
self.missing = detect_missing(handle_[0], <double*> d_y_ptr,
<int> self.batch_size * self.n_obs)
if self.missing and self.simple_differencing:
logger.warn("Missing observations detected."
" Forcing simple_differencing=False")
self.simple_differencing = False
if self.simple_differencing:
# Compute the differenced series
batched_diff(handle_[0], <double*> d_y_diff_ptr, <double*> d_y_ptr,
<int> self.batch_size, <int> self.n_obs, self.order)
# Create a version of the order for the differenced series
cpp_order_diff.d = 0
cpp_order_diff.D = 0
self.order_diff = cpp_order_diff
if cpp_order_diff.n_exog > 0:
d_exog_ptr = self.d_exog.ptr
d_exog_diff_ptr = self._d_exog_diff.ptr
batched_diff(handle_[0], <double*> d_exog_diff_ptr,
<double*> d_exog_ptr,
<int> self.batch_size * cpp_order_diff.n_exog,
<int> self.n_obs, self.order)
else:
self.order_diff = None
def __str__(self):
cdef ARIMAOrder order = self.order
intercept_str = 'c' if order.k else 'n'
if order.s:
return "ARIMA({},{},{})({},{},{})_{} ({}) - {} series".format(
order.p, order.d, order.q, order.P, order.D, order.Q, order.s,
intercept_str, self.batch_size)
else:
return "ARIMA({},{},{}) ({}) - {} series".format(
order.p, order.d, order.q, intercept_str, self.batch_size)
@nvtx_annotate(message="tsa.arima.ARIMA._ic", domain="cuml_python")
@cuml.internals.api_base_return_any_skipall
def _ic(self, ic_type: str):
"""Wrapper around C++ information_criterion
"""
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef ARIMAOrder order = self.order
cdef ARIMAOrder order_kf = \
self.order_diff if self.simple_differencing else self.order
cdef ARIMAParams[double] cpp_params = ARIMAParamsWrapper(self).params
ic = CumlArray.empty(self.batch_size, self.dtype)
cdef uintptr_t d_ic_ptr = ic.ptr
cdef uintptr_t d_y_kf_ptr = \
self._d_y_diff.ptr if self.simple_differencing else self.d_y.ptr
cdef uintptr_t d_exog_kf_ptr = <uintptr_t> NULL
if order.n_exog:
d_exog_kf_ptr = (self._d_exog_diff.ptr if self.simple_differencing
else self.d_exog.ptr)
n_obs_kf = (self.n_obs_diff if self.simple_differencing
else self.n_obs)
ic_name_to_number = {"aic": 0, "aicc": 1, "bic": 2}
cdef int ic_type_id
try:
ic_type_id = ic_name_to_number[ic_type.lower()]
except KeyError as e:
raise NotImplementedError("IC type '{}' unknown".format(ic_type))
cdef uintptr_t d_temp_mem = self._temp_mem.ptr
arima_mem_ptr = new ARIMAMemory[double](
order, <int> self.batch_size, <int> self.n_obs,
<char*> d_temp_mem)
information_criterion(handle_[0], arima_mem_ptr[0],
<double*> d_y_kf_ptr, <double*> d_exog_kf_ptr,
<int> self.batch_size, <int> n_obs_kf, order_kf,
cpp_params, <double*> d_ic_ptr,
<int> ic_type_id)
del arima_mem_ptr
return ic
@property
def aic(self) -> CumlArray:
"""Akaike Information Criterion"""
return self._ic("aic")
@property
def aicc(self) -> CumlArray:
"""Corrected Akaike Information Criterion"""
return self._ic("aicc")
@property
def bic(self) -> CumlArray:
"""Bayesian Information Criterion"""
return self._ic("bic")
@property
def complexity(self):
"""Model complexity (number of parameters)"""
cdef ARIMAOrder order = self.order
return (order.p + order.P + order.q + order.Q + order.k + order.n_exog
+ 1)
@cuml.internals.api_base_return_generic(input_arg=None)
def get_fit_params(self) -> Dict[str, CumlArray]:
"""Get all the fit parameters. Not to be confused with get_params
Note: pack() can be used to get a compact vector of the parameters
Returns
-------
params: Dict[str, array-like]
A dictionary of parameter names and associated arrays
The key names are in {"mu", "ar", "ma", "sar", "sma", "sigma2"}
The shape of the arrays are (batch_size,) for mu and sigma2 and
(n, batch_size) for any other type, where n is the corresponding
number of parameters of this type.
"""
cdef ARIMAOrder order = self.order
params = dict()
names = ["mu", "beta", "ar", "ma", "sar", "sma", "sigma2"]
criteria = [order.k, order.n_exog, order.p, order.q, order.P, order.Q,
True]
for i in range(len(names)):
if criteria[i] > 0:
params[names[i]] = getattr(self, "{}_".format(names[i]))
return params
def set_fit_params(self, params: Mapping[str, object]):
"""Set all the fit parameters. Not to be confused with ``set_params``
Note: `unpack()` can be used to load a compact vector of the
parameters
Parameters
----------
params: Mapping[str, array-like]
A dictionary of parameter names and associated arrays
The key names are in {"mu", "ar", "ma", "sar", "sma", "sigma2"}
The shape of the arrays are (batch_size,) for mu and sigma2 and
(n, batch_size) for any other type, where n is the corresponding
number of parameters of this type.
"""
for param_name in ["mu", "beta", "ar", "ma", "sar", "sma", "sigma2"]:
if param_name in params:
array, *_ = input_to_cuml_array(params[param_name],
check_dtype=np.float64)
setattr(self, "{}_".format(param_name), array)
def get_param_names(self):
raise NotImplementedError
def get_param_names(self):
"""
.. warning:: ARIMA is unable to be cloned at this time.
The methods: `get_param_names()`, `get_params` and
`set_params` will raise ``NotImplementedError``
"""
raise NotImplementedError("ARIMA is unable to be cloned via "
"`get_params` and `set_params`.")
def get_params(self, deep=True):
"""
.. warning:: ARIMA is unable to be cloned at this time.
The methods: `get_param_names()`, `get_params` and
`set_params` will raise ``NotImplementedError``
"""
raise NotImplementedError("ARIMA is unable to be cloned via "
"`get_params` and `set_params`.")
def set_params(self, **params):
"""
.. warning:: ARIMA is unable to be cloned at this time.
The methods: `get_param_names()`, `get_params` and
`set_params` will raise ``NotImplementedError``
"""
raise NotImplementedError("ARIMA is unable to be cloned via "
"`get_params` and `set_params`.")
@cuml.internals.api_base_return_generic(input_arg=None)
def predict(
self,
start=0,
end=None,
level=None,
exog=None,
) -> Union[CumlArray, Tuple[CumlArray, CumlArray, CumlArray]]:
"""Compute in-sample and/or out-of-sample prediction for each series
Parameters
----------
start : int (default = 0)
Index where to start the predictions (0 <= start <= num_samples)
end : int (default = None)
Index where to end the predictions, excluded (end > start), or
``None`` to predict until the last observation
level : float or None (default = None)
Confidence level for prediction intervals, or None to return only
the point forecasts. ``0 < level < 1``
exog : dataframe or array-like (device or host)
Future values for exogenous variables. Assumed to have each time
series in columns, such that variables associated with a same
batch member are adjacent.
Shape = (end - n_obs, n_exog * batch_size)
Returns
-------
y_p : array-like (device)
Predictions. Shape = (end - start, batch_size)
lower: array-like (device) (optional)
Lower limit of the prediction interval if ``level != None``
Shape = (end - start, batch_size)
upper: array-like (device) (optional)
Upper limit of the prediction interval if ``level != None``
Shape = (end - start, batch_size)
Examples
--------
.. code-block:: python
from cuml.tsa.arima import ARIMA
model = ARIMA(ys, order=(1,1,1))
model.fit()
y_pred = model.predict()
"""
cdef ARIMAOrder order = self.order
cdef ARIMAParams[double] cpp_params = ARIMAParamsWrapper(self).params
if start < 0:
raise ValueError("ERROR(`predict`): start < 0")
elif start > self.n_obs:
raise ValueError("ERROR(`predict`): There can't be a gap between"
" the data and the prediction")
elif end <= start:
raise ValueError("ERROR(`predict`): end <= start")
elif self.simple_differencing and start < order.d + order.D * order.s:
logger.warn("Predictions before {} are undefined when using"
" simple_differencing=True, will be set to NaN"
.format(order.d + order.D * order.s))
if level is not None:
if level <= 0 or level >= 1:
raise ValueError("ERROR: Invalid confidence level: {}"
.format(level))
elif level > 0 and start < self.n_obs:
raise ValueError("ERROR: Prediction intervals can only be"
" computed for out-of-sample predictions")
if end is None:
end = self.n_obs
if order.n_exog > 0 and end > self.n_obs and exog is None:
raise ValueError("The model was fit with a regression component,"
" so future values must be provided via `exog`")
elif order.n_exog == 0 and exog is not None:
raise ValueError("A value was given for `exog` but the model was"
" fit without any regression component")
elif end <= self.n_obs and exog is not None:
raise ValueError("A value was given for `exog` but only in-sample"
" predictions were requested")
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
predict_size = end - start
# Future values of the exogenous variables
cdef uintptr_t d_exog_fut_ptr = <uintptr_t> NULL
if order.n_exog and end > self.n_obs:
d_exog_fut, n_obs_fut, n_cols_fut, _ \
= input_to_cuml_array(exog, check_dtype=np.float64)
if n_obs_fut != end - self.n_obs:
raise ValueError(
"Dimensions mismatch: `exog` should contain {}"
" observations per column".format(end - self.n_obs))
elif n_cols_fut != self.batch_size * order.n_exog:
raise ValueError(
"Dimensions mismatch: `exog` should have {} columns"
.format(self.batch_size * order.n_exog))
d_exog_fut_ptr = d_exog_fut.ptr
# allocate predictions and intervals device memory
cdef uintptr_t d_y_p_ptr = <uintptr_t> NULL
cdef uintptr_t d_lower_ptr = <uintptr_t> NULL
cdef uintptr_t d_upper_ptr = <uintptr_t> NULL
d_y_p = CumlArray.empty((predict_size, self.batch_size),
dtype=np.float64, order="F")
d_y_p_ptr = d_y_p.ptr
if level is not None:
d_lower = CumlArray.empty((predict_size, self.batch_size),
dtype=np.float64, order="F")
d_upper = CumlArray.empty((predict_size, self.batch_size),
dtype=np.float64, order="F")
d_lower_ptr = d_lower.ptr
d_upper_ptr = d_upper.ptr
cdef uintptr_t d_y_ptr = self.d_y.ptr
cdef uintptr_t d_exog_ptr = <uintptr_t> NULL
if order.n_exog:
d_exog_ptr = self.d_exog.ptr
cdef uintptr_t d_temp_mem = self._temp_mem.ptr
arima_mem_ptr = new ARIMAMemory[double](
order, <int> self.batch_size, <int> self.n_obs,
<char*> d_temp_mem)
cpp_predict(handle_[0], arima_mem_ptr[0], <double*>d_y_ptr,
<double*>d_exog_ptr, <double*>d_exog_fut_ptr,
<int> self.batch_size, <int> self.n_obs, <int> start,
<int> end, order, cpp_params, <double*>d_y_p_ptr,
<bool> self.simple_differencing,
<double> (0 if level is None else level),
<double*> d_lower_ptr, <double*> d_upper_ptr)
del arima_mem_ptr
if level is None:
return d_y_p
else:
return (d_y_p,
d_lower,
d_upper)
@nvtx_annotate(message="tsa.arima.ARIMA.forecast", domain="cuml_python")
@cuml.internals.api_base_return_generic_skipall
def forecast(
self,
nsteps: int,
level=None,
exog=None
) -> Union[CumlArray, Tuple[CumlArray, CumlArray, CumlArray]]:
"""Forecast the given model `nsteps` into the future.
Parameters
----------
nsteps : int
The number of steps to forecast beyond end of the given series
level : float or None (default = None)
Confidence level for prediction intervals, or None to return only
the point forecasts. 0 < level < 1
exog : dataframe or array-like (device or host) (default=None)
Future values for exogenous variables. Assumed to have each time
series in columns, such that variables associated with a same
batch member are adjacent.
Shape = (nsteps, n_exog * batch_size)
Returns
-------
y_fc : array-like
Forecasts. Shape = (nsteps, batch_size)
lower : array-like (device) (optional)
Lower limit of the prediction interval if level != None
Shape = (end - start, batch_size)
upper : array-like (device) (optional)
Upper limit of the prediction interval if level != None
Shape = (end - start, batch_size)
Examples
--------
.. code-block:: python
from cuml.tsa.arima import ARIMA
...
model = ARIMA(ys, order=(1,1,1))
model.fit()
y_fc = model.forecast(10)
"""
return self.predict(self.n_obs, self.n_obs + nsteps, level, exog)
@cuml.internals.api_base_return_any_skipall
def _create_arrays(self):
"""Create the parameter arrays if non-existing"""
cdef ARIMAOrder order = self.order
if order.k and not hasattr(self, "mu_"):
self.mu_ = CumlArray.empty(self.batch_size, np.float64)
if order.n_exog and not hasattr(self, "beta_"):
self.beta_ = CumlArray.empty((order.n_exog, self.batch_size),
np.float64)
if order.p and not hasattr(self, "ar_"):
self.ar_ = CumlArray.empty((order.p, self.batch_size),
np.float64)
if order.q and not hasattr(self, "ma_"):
self.ma_ = CumlArray.empty((order.q, self.batch_size),
np.float64)
if order.P and not hasattr(self, "sar_"):
self.sar_ = CumlArray.empty((order.P, self.batch_size),
np.float64)
if order.Q and not hasattr(self, "sma_"):
self.sma_ = CumlArray.empty((order.Q, self.batch_size),
np.float64)
if not hasattr(self, "sigma2_"):
self.sigma2_ = CumlArray.empty(self.batch_size, np.float64)
@nvtx_annotate(message="tsa.arima.ARIMA._estimate_x0",
domain="cuml_python")
@cuml.internals.api_base_return_any_skipall
def _estimate_x0(self):
"""Internal method. Estimate initial parameters of the model.
"""
self._create_arrays()
cdef ARIMAOrder order = self.order
cdef ARIMAParams[double] cpp_params = ARIMAParamsWrapper(self).params
cdef uintptr_t d_y_ptr = self.d_y.ptr
cdef uintptr_t d_exog_ptr = <uintptr_t> NULL
if order.n_exog:
d_exog_ptr = self.d_exog.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
# Call C++ function
estimate_x0(handle_[0], cpp_params, <double*> d_y_ptr,
<double*> d_exog_ptr, <int> self.batch_size,
<int> self.n_obs, order, <bool> self.missing)
@cuml.internals.api_base_return_any_skipall
def fit(self,
start_params: Optional[Mapping[str, object]] = None,
opt_disp: int = -1,
h: float = 1e-8,
maxiter: int = 1000,
method="ml",
truncate: int = 0) -> "ARIMA":
r"""Fit the ARIMA model to each time series.
Parameters
----------
start_params : Mapping[str, array-like] (optional)
A mapping (e.g dictionary) of parameter names and associated arrays
The key names are in {"mu", "ar", "ma", "sar", "sma", "sigma2"}
The shape of the arrays are (batch_size,) for mu and sigma2
parameters and (n, batch_size) for any other type, where n is the
corresponding number of parameters of this type.
Pass None for automatic estimation (recommended)
opt_disp : int
Fit diagnostic level (for L-BFGS solver):
* `-1` for no output (default)
* `0<n<100` for output every `n` steps
* `n>100` for more detailed output
h : float (default=1e-8)
Finite-differencing step size. The gradient is computed using
forward finite differencing:
:math:`g = \frac{f(x + \mathtt{h}) - f(x)}{\mathtt{h}} + O(\mathtt{h})`
maxiter : int (default=1000)
Maximum number of iterations of L-BFGS-B
method : str (default="ml")
Estimation method - "css", "css-ml" or "ml".
CSS uses a sum-of-squares approximation.
ML estimates the log-likelihood with statespace methods.
CSS-ML starts with CSS and refines with ML.
truncate : int (default=0)
When using CSS, start the sum of squares after a given number of
observations
""" # noqa
def fit_helper(x_in, fit_method):
def f(x: np.ndarray) -> np.ndarray:
"""The (batched) energy functional returning the negative
log-likelihood (foreach series)."""
# Recall: We maximize LL by minimizing -LL
n_llf = -self._loglike(x, True, fit_method, truncate)
return n_llf / (self.n_obs - 1)
# Optimized finite differencing gradient for batches
def gf(x) -> np.ndarray:
"""The gradient of the (batched) energy functional."""
# Recall: We maximize LL by minimizing -LL
n_gllf = -self._loglike_grad(x, h, True, fit_method, truncate)
return n_gllf / (self.n_obs - 1)
# Check initial parameter sanity
if ((np.isnan(x_in).any()) or (np.isinf(x_in).any())):
raise FloatingPointError(
"Initial parameter vector x has NaN or Inf.")
# Optimize parameters by minimizing log likelihood.
x_out, niter, flags = batched_fmin_lbfgs_b(
f, x_in, self.batch_size, gf, iprint=opt_disp, factr=1000,
maxiter=maxiter)
# Handle non-zero flags with Warning
if (flags != 0).any():
logger.warn("fit: Some batch members had optimizer problems")
return x_out, niter
if start_params is None:
self._estimate_x0()
else:
self.set_fit_params(start_params)
x0 = self._batched_transform(self.pack(), True)
method = method.lower()
if method not in {"css", "css-ml", "ml"}:
raise ValueError("Unknown method: {}".format(method))
if self.missing and (method == "css" or method == "css-ml"):
logger.warn("Missing observations detected."
" Forcing method=\"ml\"")
method = "ml"
if method == "css" or method == "css-ml":
x, self.niter = fit_helper(x0, "css")
if method == "css-ml" or method == "ml":
x, niter = fit_helper(x if method == "css-ml" else x0, "ml")
self.niter = (self.niter + niter) if method == "css-ml" else niter
self.unpack(self._batched_transform(x))
return self
@nvtx_annotate(message="tsa.arima.ARIMA._loglike", domain="cuml_python")
@cuml.internals.api_base_return_any_skipall
def _loglike(self, x, trans=True, method="ml", truncate=0):
"""Compute the batched log-likelihood for the given parameters.
Parameters
----------
x : array-like
Packed parameter array, grouped by series
trans : bool (default=True)
Should the Jones' transform be applied?
Note: The parameters from a fit model are already transformed.
method : str (default="ml")
Estimation method: "css" for sum-of-squares, "ml" for
an estimation with statespace methods
truncate : int (default=0)
When using CSS, start the sum of squares after a given number of
observations
Returns
-------
loglike : numpy.ndarray
Batched log-likelihood. Shape: (batch_size,)
"""
cdef vector[double] vec_loglike
vec_loglike.resize(self.batch_size)
cdef LoglikeMethod ll_method = CSS if method == "css" else MLE
diff = ll_method != MLE or self.simple_differencing
cdef ARIMAOrder order = self.order
cdef ARIMAOrder order_kf = self.order_diff if diff else self.order
d_x_array, *_ = \
input_to_cuml_array(x, check_dtype=np.float64, order='C')
cdef uintptr_t d_x_ptr = d_x_array.ptr
cdef uintptr_t d_y_kf_ptr = \
self._d_y_diff.ptr if diff else self.d_y.ptr
cdef uintptr_t d_exog_kf_ptr = <uintptr_t> NULL
if order.n_exog:
d_exog_kf_ptr = self._d_exog_diff.ptr if diff else self.d_exog.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
n_obs_kf = (self.n_obs_diff if diff else self.n_obs)
cdef uintptr_t d_temp_mem = self._temp_mem.ptr
arima_mem_ptr = new ARIMAMemory[double](
order, <int> self.batch_size, <int> self.n_obs,
<char*> d_temp_mem)
batched_loglike(handle_[0], arima_mem_ptr[0], <double*> d_y_kf_ptr,
<double*> d_exog_kf_ptr, <int> self.batch_size,
<int> n_obs_kf, order_kf, <double*> d_x_ptr,
<double*> vec_loglike.data(), <bool> trans,
<bool> True, ll_method, <int> truncate)
del arima_mem_ptr
return np.array(vec_loglike, dtype=np.float64)
@nvtx_annotate(message="tsa.arima.ARIMA._loglike_grad",
domain="cuml_python")
@cuml.internals.api_base_return_any_skipall
def _loglike_grad(self, x, h=1e-8, trans=True, method="ml", truncate=0):
"""Compute the gradient (via finite differencing) of the batched
log-likelihood.
Parameters
----------
x : array-like
Packed parameter array, grouped by series.
Shape: (n_params * batch_size,)
h : float
The finite-difference stepsize
trans : bool (default=True)
Should the Jones' transform be applied?
Note: The parameters from a fit model are already transformed.
method : str (default="ml")
Estimation method: "css" for sum-of-squares, "ml" for
an estimation with statespace methods
truncate : int (default=0)
When using CSS, start the sum of squares after a given number of
observations
Returns
-------
grad : numpy.ndarray
Batched log-likelihood gradient. Shape: (n_params * batch_size,)
where n_params is the complexity of the model
"""
N = self.complexity
assert len(x) == N * self.batch_size
cdef LoglikeMethod ll_method = CSS if method == "css" else MLE
diff = ll_method != MLE or self.simple_differencing
grad = CumlArray.empty(N * self.batch_size, np.float64)
cdef uintptr_t d_grad = <uintptr_t> grad.ptr
cdef ARIMAOrder order = self.order
cdef ARIMAOrder order_kf = self.order_diff if diff else self.order
d_x_array, *_ = \
input_to_cuml_array(x, check_dtype=np.float64, order='C')
cdef uintptr_t d_x_ptr = d_x_array.ptr
cdef uintptr_t d_y_kf_ptr = \
self._d_y_diff.ptr if diff else self.d_y.ptr
cdef uintptr_t d_exog_kf_ptr = <uintptr_t> NULL
if order.n_exog:
d_exog_kf_ptr = self._d_exog_diff.ptr if diff else self.d_exog.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef uintptr_t d_temp_mem = self._temp_mem.ptr
arima_mem_ptr = new ARIMAMemory[double](
order, <int> self.batch_size, <int> self.n_obs,
<char*> d_temp_mem)
batched_loglike_grad(handle_[0], arima_mem_ptr[0],
<double*> d_y_kf_ptr, <double*> d_exog_kf_ptr,
<int> self.batch_size,
<int> (self.n_obs_diff if diff else self.n_obs),
order_kf, <double*> d_x_ptr, <double*> d_grad,
<double> h, <bool> trans, ll_method,
<int> truncate)
del arima_mem_ptr
return grad.to_output("numpy")
@property
def llf(self):
"""Log-likelihood of a fit model. Shape: (batch_size,)
"""
# Implementation note: this is slightly different from batched_loglike
# as it uses the device parameter arrays and not a host vector.
# Also, it always uses the MLE method, trans=False and truncate=0
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef vector[double] vec_loglike
vec_loglike.resize(self.batch_size)
cdef ARIMAOrder order = self.order
cdef ARIMAOrder order_kf = \
self.order_diff if self.simple_differencing else self.order
cdef ARIMAParams[double] cpp_params = ARIMAParamsWrapper(self).params
cdef uintptr_t d_y_kf_ptr = \
self._d_y_diff.ptr if self.simple_differencing else self.d_y.ptr
cdef uintptr_t d_exog_kf_ptr = <uintptr_t> NULL
if order.n_exog:
d_exog_kf_ptr = (self._d_exog_diff.ptr if self.simple_differencing
else self.d_exog.ptr)
n_obs_kf = (self.n_obs_diff if self.simple_differencing
else self.n_obs)
cdef LoglikeMethod ll_method = MLE
cdef uintptr_t d_temp_mem = self._temp_mem.ptr
arima_mem_ptr = new ARIMAMemory[double](
order, <int> self.batch_size, <int> self.n_obs,
<char*> d_temp_mem)
batched_loglike(handle_[0], arima_mem_ptr[0], <double*> d_y_kf_ptr,
<double*> d_exog_kf_ptr, <int> self.batch_size,
<int> n_obs_kf, order_kf, cpp_params,
<double*> vec_loglike.data(), <bool> False,
<bool> True, ll_method, <int> 0)
del arima_mem_ptr
return np.array(vec_loglike, dtype=np.float64)
@nvtx_annotate(message="tsa.arima.ARIMA.unpack", domain="cuml_python")
def unpack(self, x: Union[list, np.ndarray]):
"""Unpack linearized parameter vector `x` into the separate
parameter arrays of the model
Parameters
----------
x : array-like
Packed parameter array, grouped by series.
Shape: (n_params * batch_size,)
"""
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
self._create_arrays()
cdef ARIMAOrder order = self.order
cdef ARIMAParams[double] cpp_params = ARIMAParamsWrapper(self).params
d_x_array, *_ = \
input_to_cuml_array(x, check_dtype=np.float64, order='C')
cdef uintptr_t d_x_ptr = d_x_array.ptr
cpp_unpack(handle_[0], cpp_params, order, <int> self.batch_size,
<double*>d_x_ptr)
@nvtx_annotate(message="tsa.arima.ARIMA.pack", domain="cuml_python")
def pack(self) -> np.ndarray:
"""Pack parameters of the model into a linearized vector `x`
Returns
-------
x : numpy ndarray
Packed parameter array, grouped by series.
Shape: (n_params * batch_size,)
"""
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
cdef ARIMAOrder order = self.order
cdef ARIMAParams[double] cpp_params = ARIMAParamsWrapper(self).params
d_x_array = CumlArray.empty(self.complexity * self.batch_size,
np.float64)
cdef uintptr_t d_x_ptr = d_x_array.ptr
cpp_pack(handle_[0], cpp_params, order, <int> self.batch_size,
<double*>d_x_ptr)
return d_x_array.to_output("numpy")
@nvtx_annotate(message="tsa.arima.ARIMA._batched_transform",
domain="cuml_python")
@cuml.internals.api_base_return_any_skipall
def _batched_transform(self, x, isInv=False):
"""Applies Jones transform or inverse transform to a parameter vector
Parameters
----------
x : array-like
Packed parameter array, grouped by series.
Shape: (n_params * batch_size,)
Returns
-------
Tx : array-like
Packed transformed parameter array, grouped by series.
Shape: (n_params * batch_size,)
"""
cdef ARIMAOrder order = self.order
N = self.complexity
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
Tx = np.zeros(self.batch_size * N)
cdef uintptr_t d_temp_mem = self._temp_mem.ptr
arima_mem_ptr = new ARIMAMemory[double](
order, <int> self.batch_size, <int> self.n_obs,
<char*> d_temp_mem)
cdef uintptr_t x_ptr = x.ctypes.data
cdef uintptr_t Tx_ptr = Tx.ctypes.data
batched_jones_transform(
handle_[0], arima_mem_ptr[0], order, <int> self.batch_size,
<bool> isInv, <double*>x_ptr, <double*>Tx_ptr)
del arima_mem_ptr
return (Tx)
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tsa/stationarity.pyx | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from libc.stdint cimport uintptr_t
from libcpp cimport bool as boolcpp
import cuml.internals
from cuml.internals.array import CumlArray
from pylibraft.common.handle cimport handle_t
from pylibraft.common.handle import Handle
from cuml.internals.input_utils import input_to_cuml_array
cdef extern from "cuml/tsa/stationarity.h" namespace "ML":
int cpp_kpss "ML::Stationarity::kpss_test" (
const handle_t& handle,
const float* d_y,
boolcpp* results,
int batch_size,
int n_obs,
int d, int D, int s,
float pval_threshold)
int cpp_kpss "ML::Stationarity::kpss_test" (
const handle_t& handle,
const double* d_y,
boolcpp* results,
int batch_size,
int n_obs,
int d, int D, int s,
double pval_threshold)
@cuml.internals.api_return_array(input_arg="y", get_output_type=True)
def kpss_test(y, d=0, D=0, s=0, pval_threshold=0.05,
handle=None) -> CumlArray:
"""
Perform the KPSS stationarity test on the data differenced according
to the given order
Parameters
----------
y : dataframe or array-like (device or host)
The time series data, assumed to have each time series in columns.
Acceptable formats: cuDF DataFrame, cuDF Series, NumPy ndarray,
Numba device ndarray, cuda array interface compliant array like CuPy.
d: integer
Order of simple differencing
D: integer
Order of seasonal differencing
s: integer
Seasonal period if D > 0
pval_threshold : float
The p-value threshold above which a series is considered stationary.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
Returns
-------
stationarity : List[bool]
A list of the stationarity test result for each series in the batch
"""
d_y, n_obs, batch_size, dtype = \
input_to_cuml_array(y, check_dtype=[np.float32, np.float64])
cdef uintptr_t d_y_ptr = d_y.ptr
if handle is None:
handle = Handle()
cdef handle_t* handle_ = <handle_t*><size_t>handle.getHandle()
results = CumlArray.empty(batch_size, dtype=bool)
cdef uintptr_t d_results = results.ptr
# Call C++ function
if dtype == np.float32:
cpp_kpss(handle_[0],
<float*> d_y_ptr,
<boolcpp*> d_results,
<int> batch_size,
<int> n_obs,
<int> d, <int> D, <int> s,
<float> pval_threshold)
elif dtype == np.float64:
cpp_kpss(handle_[0],
<double*> d_y_ptr,
<boolcpp*> d_results,
<int> batch_size,
<int> n_obs,
<int> d, <int> D, <int> s,
<double> pval_threshold)
return results
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tsa/auto_arima.pyx | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import typing
import itertools
from libc.stdint cimport uintptr_t
from libcpp cimport bool
from libcpp.vector cimport vector
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import('numpy')
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import('cupy')
import cuml.internals
from cuml.internals import logger
from cuml.common.array_descriptor import CumlArrayDescriptor
from cuml.internals.array import CumlArray
from cuml.internals.base import Base
from cuml.internals import _deprecate_pos_args
from pylibraft.common.handle cimport handle_t
from pylibraft.common.handle import Handle
from cuml.common import input_to_cuml_array
from cuml.common import using_output_type
from cuml.tsa.arima import ARIMA
from cuml.tsa.seasonality import seas_test
from cuml.tsa.stationarity import kpss_test
# TODO:
# - Box-Cox transformations? (parameter lambda)
# - Would a "one-fits-all" method be useful?
cdef extern from "cuml/tsa/auto_arima.h" namespace "ML":
int divide_by_mask_build_index(const handle_t& handle, const bool* mask,
int* index, int batch_size)
void divide_by_mask_execute(const handle_t& handle, const float* d_in,
const bool* mask, const int* index,
float* d_out0, float* d_out1, int batch_size,
int n_obs)
void divide_by_mask_execute(const handle_t& handle, const double* d_in,
const bool* mask, const int* index,
double* d_out0, double* d_out1,
int batch_size, int n_obs)
void divide_by_mask_execute(const handle_t& handle, const int* d_in,
const bool* mask, const int* index,
int* d_out0, int* d_out1, int batch_size,
int n_obs)
void divide_by_min_build_index(const handle_t& handle,
const float* d_matrix, int* d_batch,
int* d_index, int* h_size,
int batch_size, int n_sub)
void divide_by_min_build_index(const handle_t& handle,
const double* d_matrix, int* d_batch,
int* d_index, int* h_size,
int batch_size, int n_sub)
void divide_by_min_execute(const handle_t& handle, const float* d_in,
const int* d_batch, const int* d_index,
float** hd_out, int batch_size, int n_sub,
int n_obs)
void divide_by_min_execute(const handle_t& handle, const double* d_in,
const int* d_batch, const int* d_index,
double** hd_out, int batch_size, int n_sub,
int n_obs)
void divide_by_min_execute(const handle_t& handle, const int* d_in,
const int* d_batch, const int* d_index,
int** hd_out, int batch_size, int n_sub,
int n_obs)
void cpp_build_division_map "ML::build_division_map" (
const handle_t& handle, const int* const* hd_id, const int* h_size,
int* d_id_to_pos, int* d_id_to_model, int batch_size, int n_sub)
void cpp_merge_series "ML::merge_series" (
const handle_t& handle, const float* const* hd_in,
const int* d_id_to_pos, const int* d_id_to_sub, float* d_out,
int batch_size, int n_sub, int n_obs)
void cpp_merge_series "ML::merge_series" (
const handle_t& handle, const double* const* hd_in,
const int* d_id_to_pos, const int* d_id_to_sub, double* d_out,
int batch_size, int n_sub, int n_obs)
cdef extern from "cuml/tsa/batched_arima.hpp" namespace "ML":
bool detect_missing(
handle_t& handle, const double* d_y, int n_elem)
tests_map = {
"kpss": kpss_test,
"seas": seas_test,
}
class AutoARIMA(Base):
"""
Implements a batched auto-ARIMA model for in- and out-of-sample
times-series prediction.
This interface offers a highly customizable search, with functionality
similar to the `forecast` and `fable` packages in R. It provides an
abstraction around the underlying ARIMA models to predict and forecast as
if using a single model.
Parameters
----------
endog : dataframe or array-like (device or host)
The time series data, assumed to have each time series in columns.
Acceptable formats: cuDF DataFrame, cuDF Series, NumPy ndarray,
Numba device ndarray, cuda array interface compliant array like CuPy.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
simple_differencing: bool or int, default=True
If True, the data is differenced before being passed to the Kalman
filter. If False, differencing is part of the state-space model.
See additional notes in the ARIMA docs
verbose : int or boolean, default=False
Sets logging level. It must be one of `cuml.common.logger.level_*`.
See :ref:`verbosity-levels` for more info.
output_type : {'input', 'array', 'dataframe', 'series', 'df_obj', \
'numba', 'cupy', 'numpy', 'cudf', 'pandas'}, default=None
Return results and set estimator attributes to the indicated output
type. If None, the output type set at the module level
(`cuml.global_settings.output_type`) will be used. See
:ref:`output-data-type-configuration` for more info.
convert_dtype : boolean
When set to True, the model will automatically convert the inputs to
np.float64.
Notes
-----
The interface was influenced by the R `fable` package:
See https://fable.tidyverts.org/reference/ARIMA.html
References
----------
A useful (though outdated) reference is the paper:
.. [1] Rob J. Hyndman, Yeasmin Khandakar, 2008. "Automatic Time Series
Forecasting: The 'forecast' Package for R", Journal of Statistical
Software 27
Examples
--------
.. code-block:: python
from cuml.tsa.auto_arima import AutoARIMA
model = AutoARIMA(y)
model.search(s=12, d=(0, 1), D=(0, 1), p=(0, 2, 4), q=(0, 2, 4),
P=range(2), Q=range(2), method="css", truncate=100)
model.fit(method="css-ml")
fc = model.forecast(20)
"""
d_y = CumlArrayDescriptor()
@_deprecate_pos_args(version="21.06")
def __init__(self,
endog,
*,
handle=None,
simple_differencing=True,
verbose=False,
output_type=None,
convert_dtype=True):
# Initialize base class
super().__init__(handle=handle,
verbose=verbose,
output_type=output_type)
self._set_base_attributes(output_type=endog)
# Get device array. Float64 only for now.
self.d_y, self.n_obs, self.batch_size, self.dtype \
= input_to_cuml_array(
endog, check_dtype=np.float64,
convert_to_dtype=(np.float64 if convert_dtype else None))
self.simple_differencing = simple_differencing
self._initial_calc()
@cuml.internals.api_base_return_any_skipall
def _initial_calc(self):
cdef uintptr_t d_y_ptr = self.d_y.ptr
cdef handle_t* handle_ = <handle_t*><size_t>self.handle.getHandle()
# Detect missing observations
missing = detect_missing(handle_[0], <double*> d_y_ptr,
<int> self.batch_size * self.n_obs)
if missing:
raise ValueError(
"Missing observations are not supported in AutoARIMA yet")
@cuml.internals.api_return_any()
def search(self,
s=None,
d=range(3),
D=range(2),
p=range(1, 4),
q=range(1, 4),
P=range(3),
Q=range(3),
fit_intercept="auto",
ic="aicc",
test="kpss",
seasonal_test="seas",
h: float = 1e-8,
maxiter: int = 1000,
method="auto",
truncate: int = 0):
"""Searches through the specified model space and associates each
series to the most appropriate model.
Parameters
----------
s : int
Seasonal period. None or 0 for non-seasonal time series
d : int, sequence or generator
Possible values for d (simple difference)
D : int, sequence or generator
Possible values for D (seasonal difference)
p : int, sequence or generator
Possible values for p (AR order)
q : int, sequence or generator
Possible values for q (MA order)
P : int, sequence or generator
Possible values for P (seasonal AR order)
Q : int, sequence or generator
Possible values for Q (seasonal MA order)
fit_intercept : int, sequence, generator or "auto"
Whether to fit an intercept. "auto" chooses based on the model
parameters: it uses an incercept iff d + D <= 1
ic : str
Which information criterion to use for the model selection.
Currently supported: AIC, AICc, BIC
test : str
Which stationarity test to use to choose d.
Currently supported: KPSS
seasonal_test : str
Which seasonality test to use to choose D.
Currently supported: seas
h : float
Finite-differencing step size used to compute gradients in ARIMA
maxiter : int
Maximum number of iterations of L-BFGS-B
method : str
Estimation method - "auto", "css", "css-ml" or "ml".
CSS uses a fast sum-of-squares approximation.
ML estimates the log-likelihood with statespace methods.
CSS-ML starts with CSS and refines with ML.
"auto" will use CSS for long seasonal time series, ML otherwise.
truncate : int
When using CSS, start the sum of squares after a given number of
observations for better performance. Recommended for long time
series when truncating doesn't lose too much information.
"""
# Notes:
# - We iteratively divide the dataset as we decide parameters, so
# it's important to make sure that we don't keep the unused arrays
# alive, so they can get garbage-collected.
# - As we divide the dataset, we also keep track of the original
# index of each series in the batch, to construct the final map at
# the end.
# Parse input parameters
ic = ic.lower()
test = test.lower()
seasonal_test = seasonal_test.lower()
if s is None or s == 1: # R users might use s=1 for non-seasonal data
s = 0
if method == "auto":
method = "css" if self.n_obs >= 100 and s >= 4 else "ml"
# Original index
d_index, *_ = input_to_cuml_array(np.r_[:self.batch_size],
convert_to_dtype=np.int32)
#
# Choose the hyper-parameter D
#
logger.info("Deciding D...")
D_options = _parse_sequence("D", D, 0, 1)
if not s:
# Non-seasonal -> D=0
data_D = {0: (self.d_y, d_index)}
elif len(D_options) == 1:
# D is specified by the user
data_D = {D_options[0]: (self.d_y, d_index)}
else:
# D is chosen with a seasonal differencing test
if seasonal_test not in tests_map:
raise ValueError("Unknown seasonal diff test: {}"
.format(seasonal_test))
with using_output_type("cupy"):
mask_cp = tests_map[seasonal_test](self.d_y, s)
mask = input_to_cuml_array(mask_cp)[0]
del mask_cp
data_D = {}
out0, index0, out1, index1 = _divide_by_mask(self.d_y, mask,
d_index)
if out0 is not None:
data_D[0] = (out0, index0)
if out1 is not None:
data_D[1] = (out1, index1)
del mask, out0, index0, out1, index1
#
# Choose the hyper-parameter d
#
logger.info("Deciding d...")
data_dD = {}
for D_ in data_D:
d_options = _parse_sequence("d", d, 0, 2 - D_)
if len(d_options) == 1:
# d is specified by the user
data_dD[(d_options[0], D_)] = data_D[D_]
else:
# d is decided with stationarity tests
if test not in tests_map:
raise ValueError("Unknown stationarity test: {}"
.format(test))
data_temp, id_temp = data_D[D_]
for d_ in d_options[:-1]:
mask_cp = tests_map[test](data_temp.to_output("cupy"),
d_, D_, s)
mask = input_to_cuml_array(mask_cp)[0]
del mask_cp
out0, index0, out1, index1 \
= _divide_by_mask(data_temp, mask, id_temp)
if out1 is not None:
data_dD[(d_, D_)] = (out1, index1)
if out0 is not None:
(data_temp, id_temp) = (out0, index0)
else:
break
else: # (when the for loop reaches its end naturally)
# The remaining series are assigned the max possible d
data_dD[(d_options[-1], D_)] = (data_temp, id_temp)
del data_temp, id_temp, mask, out0, index0, out1, index1
del data_D
#
# Choose the hyper-parameters p, q, P, Q, k
#
logger.info("Deciding p, q, P, Q, k...")
p_options = _parse_sequence("p", p, 0, s - 1 if s else 4)
q_options = _parse_sequence("q", q, 0, s - 1 if s else 4)
P_options = _parse_sequence("P", P, 0, 4 if s else 0)
Q_options = _parse_sequence("Q", Q, 0, 4 if s else 0)
self.models = []
id_tracker = []
for (d_, D_) in data_dD:
data_temp, id_temp = data_dD[(d_, D_)]
batch_size = data_temp.shape[1] if len(data_temp.shape) > 1 else 1
k_options = ([1 if d_ + D_ <= 1 else 0] if fit_intercept == "auto"
else _parse_sequence("k", fit_intercept, 0, 1))
# Grid search
all_ic = []
all_orders = []
for p_, q_, P_, Q_, k_ in itertools.product(p_options, q_options,
P_options, Q_options,
k_options):
if p_ + q_ + P_ + Q_ + k_ == 0:
continue
s_ = s if (P_ + D_ + Q_) else 0
model = ARIMA(endog=data_temp.to_output("cupy"),
order=(p_, d_, q_),
seasonal_order=(P_, D_, Q_, s_),
fit_intercept=k_,
handle=self.handle,
simple_differencing=self.simple_differencing,
output_type="cupy")
logger.debug("Fitting {} ({})".format(model, method))
model.fit(h=h, maxiter=maxiter, method=method,
truncate=truncate)
all_ic.append(model._ic(ic))
all_orders.append((p_, q_, P_, Q_, s_, k_))
del model
# Organize the results into a matrix
n_models = len(all_orders)
ic_matrix, *_ = input_to_cuml_array(
cp.concatenate([ic_arr.to_output('cupy').reshape(batch_size, 1)
for ic_arr in all_ic], 1))
# Divide the batch, choosing the best model for each series
sub_batches, sub_id = _divide_by_min(data_temp, ic_matrix, id_temp)
for i in range(n_models):
if sub_batches[i] is None:
continue
p_, q_, P_, Q_, s_, k_ = all_orders[i]
self.models.append(
ARIMA(sub_batches[i].to_output("cupy"), order=(p_, d_, q_),
seasonal_order=(P_, D_, Q_, s_), fit_intercept=k_,
handle=self.handle, output_type="cupy",
simple_differencing=self.simple_differencing))
id_tracker.append(sub_id[i])
del all_ic, all_orders, ic_matrix, sub_batches, sub_id
# Build a map to match each series to its model and position in the
# sub-batch
logger.info("Finalizing...")
self.id_to_model, self.id_to_pos = _build_division_map(id_tracker,
self.batch_size)
@cuml.internals.api_base_return_any_skipall
def fit(self,
h: float = 1e-8,
maxiter: int = 1000,
method="ml",
truncate: int = 0):
"""Fits the selected models for their respective series
Parameters
----------
h : float
Finite-differencing step size used to compute gradients in ARIMA
maxiter : int
Maximum number of iterations of L-BFGS-B
method : str
Estimation method - "css", "css-ml" or "ml".
CSS uses a fast sum-of-squares approximation.
ML estimates the log-likelihood with statespace methods.
CSS-ML starts with CSS and refines with ML.
truncate : int
When using CSS, start the sum of squares after a given number of
observations for better performance (but often a worse fit)
"""
for model in self.models:
logger.debug("Fitting {} ({})".format(model, method))
model.fit(h=h, maxiter=maxiter, method=method, truncate=truncate)
@cuml.internals.api_base_return_generic_skipall
def predict(
self,
start=0,
end=None,
level=None
) -> typing.Union[CumlArray, typing.Tuple[CumlArray, CumlArray,
CumlArray]]:
"""Compute in-sample and/or out-of-sample prediction for each series
Parameters
----------
start: int
Index where to start the predictions (0 <= start <= num_samples)
end:
Index where to end the predictions, excluded (end > start)
level: float or None (default = None)
Confidence level for prediction intervals, or None to return only
the point forecasts. 0 < level < 1
Returns
-------
y_p : array-like (device)
Predictions. Shape = (end - start, batch_size)
lower: array-like (device) (optional)
Lower limit of the prediction interval if level != None
Shape = (end - start, batch_size)
upper: array-like (device) (optional)
Upper limit of the prediction interval if level != None
Shape = (end - start, batch_size)
"""
# Compute predictions for each model
pred_list = []
lower_list = []
upper_list = []
for model in self.models:
if level is None:
pred, *_ = input_to_cuml_array(model.predict(start, end))
pred_list.append(pred)
else:
pred, low, upp = model.predict(start, end, level=level)
pred_list.append(input_to_cuml_array(pred)[0])
lower_list.append(input_to_cuml_array(low)[0])
upper_list.append(input_to_cuml_array(upp)[0])
# Put all the predictions together
y_p = _merge_series(pred_list, self.id_to_model, self.id_to_pos,
self.batch_size)
if level is not None:
lower = _merge_series(lower_list, self.id_to_model, self.id_to_pos,
self.batch_size)
upper = _merge_series(upper_list, self.id_to_model, self.id_to_pos,
self.batch_size)
# Return the results
if level is None:
return y_p
else:
return y_p, lower, upper
@cuml.internals.api_base_return_generic_skipall
def forecast(self,
nsteps: int,
level=None) -> typing.Union[CumlArray,
typing.Tuple[CumlArray,
CumlArray,
CumlArray]]:
"""Forecast `nsteps` into the future.
Parameters
----------
nsteps : int
The number of steps to forecast beyond end of the given series
level: float or None (default = None)
Confidence level for prediction intervals, or None to return only
the point forecasts. 0 < level < 1
Returns
-------
y_fc : array-like
Forecasts. Shape = (nsteps, batch_size)
lower: array-like (device) (optional)
Lower limit of the prediction interval if level != None
Shape = (end - start, batch_size)
upper: array-like (device) (optional)
Upper limit of the prediction interval if level != None
Shape = (end - start, batch_size)
"""
return self.predict(self.n_obs, self.n_obs + nsteps, level)
def summary(self):
"""Display a quick summary of the models selected by `search`
"""
model_list = sorted(self.models, key=lambda model: model.batch_size,
reverse=True)
print("ARIMA models used:", len(model_list))
for model in model_list:
print(" -", str(model))
# Helper functions
def _parse_sequence(name, seq_in, min_accepted, max_accepted):
"""Convert a sequence/generator/integer into a sorted list, keeping
only values within the accepted range
"""
seq_temp = [seq_in] if type(seq_in) is int else seq_in
seq_out = sorted(x for x in seq_temp
if x >= min_accepted and x <= max_accepted)
if len(seq_out) == 0:
raise ValueError("No valid option for {}".format(name))
else:
return seq_out
def _divide_by_mask(original, mask, batch_id, handle=None):
"""Divide a given batch into two sub-batches according to a boolean mask
.. note:: in case the mask contains only False or only True, one sub-batch
will be the original batch (not a copy!) and the other None
Parameters
----------
original : CumlArray (float32 or float64)
Original batch
mask : CumlArray (bool)
Boolean mask: False for the 1st sub-batch and True for the second
batch_id : CumlArray (int)
Integer array to track the id of each member in the initial batch
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
Returns
-------
out0 : CumlArray (float32 or float64)
Sub-batch 0, or None if empty
batch0_id : CumlArray (int)
Indices of the members of the sub-batch 0 in the initial batch,
or None if empty
out1 : CumlArray (float32 or float64)
Sub-batch 1, or None if empty
batch1_id : CumlArray (int)
Indices of the members of the sub-batch 1 in the initial batch,
or None if empty
"""
assert batch_id.dtype == np.int32
dtype = original.dtype
n_obs = original.shape[0]
batch_size = original.shape[1] if len(original.shape) > 1 else 1
if handle is None:
handle = Handle()
cdef handle_t* handle_ = <handle_t*><size_t>handle.getHandle()
index = CumlArray.empty(batch_size, np.int32)
cdef uintptr_t d_index = index.ptr
cdef uintptr_t d_mask = mask.ptr
# Compute the index of each series in their new batch
nb_true = divide_by_mask_build_index(handle_[0],
<bool*> d_mask,
<int*> d_index,
<int> batch_size)
out0 = CumlArray.empty((n_obs, batch_size - nb_true), dtype)
out1 = CumlArray.empty((n_obs, nb_true), dtype)
# Type declarations (can't be in if-else statements)
cdef uintptr_t d_out0
cdef uintptr_t d_out1
cdef uintptr_t d_original = original.ptr
cdef uintptr_t d_batch0_id
cdef uintptr_t d_batch1_id
cdef uintptr_t d_batch_id
# If the sub-batch 1 is empty
if nb_true == 0:
out0 = original
out1 = None
batch0_id = batch_id
batch1_id = None
# If the sub-batch 0 is empty
elif nb_true == batch_size:
out0 = None
out1 = original
batch0_id = None
batch1_id = batch_id
# If both sub-batches have elements
else:
out0 = CumlArray.empty((n_obs, batch_size - nb_true), dtype)
out1 = CumlArray.empty((n_obs, nb_true), dtype)
d_out0 = out0.ptr
d_out1 = out1.ptr
# Build the two sub-batches
if dtype == np.float32:
divide_by_mask_execute(handle_[0],
<float*> d_original,
<bool*> d_mask,
<int*> d_index,
<float*> d_out0,
<float*> d_out1,
<int> batch_size,
<int> n_obs)
else:
divide_by_mask_execute(handle_[0],
<double*> d_original,
<bool*> d_mask,
<int*> d_index,
<double*> d_out0,
<double*> d_out1,
<int> batch_size,
<int> n_obs)
# Also keep track of the original id of the series in the batch
batch0_id = CumlArray.empty(batch_size - nb_true, np.int32)
batch1_id = CumlArray.empty(nb_true, np.int32)
d_batch0_id = batch0_id.ptr
d_batch1_id = batch1_id.ptr
d_batch_id = batch_id.ptr
divide_by_mask_execute(handle_[0],
<int*> d_batch_id,
<bool*> d_mask,
<int*> d_index,
<int*> d_batch0_id,
<int*> d_batch1_id,
<int> batch_size,
<int> 1)
return out0, batch0_id, out1, batch1_id
def _divide_by_min(original, metrics, batch_id, handle=None):
"""Divide a given batch into multiple sub-batches according to the values
of the given metrics, by selecting the minimum value for each member
Parameters:
----------
original : CumlArray (float32 or float64)
Original batch
metrics : CumlArray (float32 or float64)
Matrix of shape (batch_size, n_sub) containing the metrics to minimize
batch_id : CumlArray (int)
Integer array to track the id of each member in the initial batch
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
stream that will be used for the model's computations, so users can
run different models concurrently in different streams by creating
handles in several streams.
If it is None, a new one is created.
Returns
-------
sub_batches : List[CumlArray] (float32 or float64)
List of arrays containing each sub-batch, or None if empty
sub_id : List[CumlArray] (int)
List of arrays containing the indices of each member in the initial
batch, or None if empty
"""
assert batch_id.dtype == np.int32
dtype = original.dtype
n_obs = original.shape[0]
n_sub = metrics.shape[1]
batch_size = original.shape[1] if len(original.shape) > 1 else 1
if handle is None:
handle = Handle()
cdef handle_t* handle_ = <handle_t*><size_t>handle.getHandle()
batch_buffer = CumlArray.empty(batch_size, np.int32)
index_buffer = CumlArray.empty(batch_size, np.int32)
cdef vector[int] size_buffer
size_buffer.resize(n_sub)
cdef uintptr_t d_metrics = metrics.ptr
cdef uintptr_t d_batch = batch_buffer.ptr
cdef uintptr_t d_index = index_buffer.ptr
# Compute which sub-batch each series belongs to, its position in
# the sub-batch, and the size of each sub-batch
if dtype == np.float32:
divide_by_min_build_index(handle_[0],
<float*> d_metrics,
<int*> d_batch,
<int*> d_index,
<int*> size_buffer.data(),
<int> batch_size,
<int> n_sub)
else:
divide_by_min_build_index(handle_[0],
<double*> d_metrics,
<int*> d_batch,
<int*> d_index,
<int*> size_buffer.data(),
<int> batch_size,
<int> n_sub)
# Build a list of cuML arrays for the sub-batches and a vector of pointers
# to be passed to the next C++ step
sub_batches = [CumlArray.empty((n_obs, s), dtype) if s else None
for s in size_buffer]
cdef vector[uintptr_t] sub_ptr
sub_ptr.resize(n_sub)
for i in range(n_sub):
if size_buffer[i]:
sub_ptr[i] = <uintptr_t> sub_batches[i].ptr
else:
sub_ptr[i] = <uintptr_t> NULL
# Execute the batch sub-division
cdef uintptr_t d_original = original.ptr
if dtype == np.float32:
divide_by_min_execute(handle_[0],
<float*> d_original,
<int*> d_batch,
<int*> d_index,
<float**> sub_ptr.data(),
<int> batch_size,
<int> n_sub,
<int> n_obs)
else:
divide_by_min_execute(handle_[0],
<double*> d_original,
<int*> d_batch,
<int*> d_index,
<double**> sub_ptr.data(),
<int> batch_size,
<int> n_sub,
<int> n_obs)
# Keep track of the id of the series if requested
cdef vector[uintptr_t] id_ptr
sub_id = [CumlArray.empty(s, np.int32) if s else None
for s in size_buffer]
id_ptr.resize(n_sub)
for i in range(n_sub):
if size_buffer[i]:
id_ptr[i] = <uintptr_t> sub_id[i].ptr
else:
id_ptr[i] = <uintptr_t> NULL
cdef uintptr_t d_batch_id = batch_id.ptr
divide_by_min_execute(handle_[0],
<int*> d_batch_id,
<int*> d_batch,
<int*> d_index,
<int**> id_ptr.data(),
<int> batch_size,
<int> n_sub,
<int> 1)
return sub_batches, sub_id
def _build_division_map(id_tracker, batch_size, handle=None):
"""Build a map to associate each batch member with a model and index in
the associated sub-batch
Parameters
----------
id_tracker : List[CumlArray] (int)
List of the index arrays of each sub-batch
batch_size : int
Size of the initial batch
Returns
-------
id_to_model : CumlArray (int)
Associates each batch member with a model
id_to_pos : CumlArray (int)
Position of each member in the respective sub-batch
"""
if handle is None:
handle = Handle()
cdef handle_t* handle_ = <handle_t*><size_t>handle.getHandle()
n_sub = len(id_tracker)
id_to_pos = CumlArray.empty(batch_size, np.int32)
id_to_model = CumlArray.empty(batch_size, np.int32)
cdef vector[uintptr_t] id_ptr
cdef vector[int] size_vec
id_ptr.resize(n_sub)
size_vec.resize(n_sub)
for i in range(n_sub):
id_ptr[i] = id_tracker[i].ptr
size_vec[i] = len(id_tracker[i])
cdef uintptr_t hd_id = <uintptr_t> id_ptr.data()
cdef uintptr_t h_size = <uintptr_t> size_vec.data()
cdef uintptr_t d_id_to_pos = id_to_pos.ptr
cdef uintptr_t d_id_to_model = id_to_model.ptr
cpp_build_division_map(handle_[0],
<const int**> hd_id,
<int*> h_size,
<int*> d_id_to_pos,
<int*> d_id_to_model,
<int> batch_size,
<int> n_sub)
return id_to_model, id_to_pos
def _merge_series(data_in, id_to_sub, id_to_pos, batch_size, handle=None):
"""Merge multiple sub-batches into one batch according to the maps that
associate each id in the unique batch to a sub-batch and a position in
this sub-batch.
Parameters
----------
data_in : List[CumlArray] (float32 or float64)
List of sub-batches to merge
id_to_model : CumlArray (int)
Associates each member of the batch with a sub-batch
id_to_pos : CumlArray (int)
Position of each member of the batch in its respective sub-batch
batch_size : int
Size of the initial batch
Returns
-------
data_out : CumlArray (float32 or float64)
Merged batch
"""
dtype = data_in[0].dtype
n_obs = data_in[0].shape[0]
n_sub = len(data_in)
if handle is None:
handle = Handle()
cdef handle_t* handle_ = <handle_t*><size_t>handle.getHandle()
cdef vector[uintptr_t] in_ptr
in_ptr.resize(n_sub)
for i in range(n_sub):
in_ptr[i] = data_in[i].ptr
data_out = CumlArray.empty((n_obs, batch_size), dtype)
cdef uintptr_t hd_in = <uintptr_t> in_ptr.data()
cdef uintptr_t d_id_to_pos = id_to_pos.ptr
cdef uintptr_t d_id_to_sub = id_to_sub.ptr
cdef uintptr_t d_out = data_out.ptr
if dtype == np.float32:
cpp_merge_series(handle_[0],
<const float**> hd_in,
<int*> d_id_to_pos,
<int*> d_id_to_sub,
<float*> d_out,
<int> batch_size,
<int> n_sub,
<int> n_obs)
else:
cpp_merge_series(handle_[0],
<const double**> hd_in,
<int*> d_id_to_pos,
<int*> d_id_to_sub,
<double*> d_out,
<int> batch_size,
<int> n_sub,
<int> n_obs)
return data_out
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/tsa/__init__.py | # Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.tsa.holtwinters import ExponentialSmoothing
from cuml.tsa.arima import ARIMA
| 0 |
rapidsai_public_repos/cuml/python/cuml | rapidsai_public_repos/cuml/python/cuml/explainer/common.py | #
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.base import Base
from cuml.internals.input_utils import input_to_cupy_array
from pylibraft.common.handle import Handle
from cuml.internals.safe_imports import gpu_only_import
cp = gpu_only_import("cupy")
def get_tag_from_model_func(func, tag, default=None):
"""
Function returns the tags from the model that function `func` is bound to.
Parameters
----------
func: object
Function to check whether the object it is bound to has a _get_tags
attribute, and return tags from it.
tag: str
Tag that will be returned if exists
default: object (default = None)
Value that will be returned if tags cannot be fetched.
"""
tags_fn = getattr(getattr(func, "__self__", None), "_get_tags", None)
if tags_fn is not None:
tag_value = tags_fn().get(tag)
result = tag_value if tag_value is not None else default
return result
return default
def get_handle_from_cuml_model_func(func, create_new=False):
"""
Function to obtain a RAFT handle from the object that `func` is bound to
if possible.
Parameters
----------
func: object
Function to check whether the object it is bound to has a _get_tags
attribute, and return tags from it.
create_new: boolean (default = False)
Whether to return a new RAFT handle if none could be fetched. Otherwise
the function will return None.
"""
owner = getattr(func, "__self__", None)
if owner is not None and isinstance(owner, Base):
if owner.handle is not None:
return owner.handle
handle = Handle() if create_new else None
return handle
def get_dtype_from_model_func(func, default=None):
"""
Function detect if model that `func` is bound to prefers data of certain
data type. It checks the attribute model.dtype.
Parameters
----------
func: object
Function to check whether the object it is bound to has a _get_tags
attribute, and return tags from it.
create_new: boolean (default = False)
Whether to return a new RAFT handle if none could be fetched. Otherwise
the function will return None.
"""
dtype = getattr(getattr(func, "__self__", None), "dtype", None)
dtype = default if dtype is None else dtype
return dtype
def model_func_call(X, model_func, gpu_model=False):
"""
Function to call `model_func(X)` using either `NumPy` arrays if
gpu_model is False or X directly if model_gpu based is True.
Returns the results as CuPy arrays.
"""
if gpu_model:
y = input_to_cupy_array(X=model_func(X), order="K").array
else:
try:
y = input_to_cupy_array(model_func(cp.asnumpy(X))).array
except TypeError:
raise TypeError(
"Explainer can only explain models that can "
"take GPU data or NumPy arrays as input."
)
return y
def get_cai_ptr(X):
"""
Function gets the pointer from an object that supports the
__cuda_array_interface__. Raises TypeError if `X` does not support it.
"""
if hasattr(X, "__cuda_array_interface__"):
return X.__cuda_array_interface__["data"][0]
else:
raise TypeError("X must support `__cuda_array_interface__`")
def get_link_fn_from_str_or_fn(link):
if isinstance(link, str):
if link in link_dict:
link_fn = link_dict[link]
else:
raise ValueError(
"'link' string does not identify any known" " link functions. "
)
elif callable(link):
if callable(getattr(link, "inverse", None)):
link_fn = link
else:
raise TypeError("'link' function {} is not valid.".format(link))
return link_fn
def output_list_shap_values(X, dimensions, output_type):
if output_type == "cupy":
if dimensions == 1:
return X[0]
else:
res = []
for x in X:
res.append(x)
return res
else:
if dimensions == 1:
return cp.asnumpy(X[0])
else:
res = []
for x in X:
res.append(cp.asnumpy(x))
return res
# link functions
def identity(x):
return x
def _identity_inverse(x):
return x
def logit(x):
return cp.log(x / (1 - x))
def _logit_inverse(x):
return 1 / (1 + cp.exp(-x))
identity.inverse = _identity_inverse
logit.inverse = _logit_inverse
link_dict = {"identity": identity, "logit": logit}
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.