repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/datasets/make_regression.hpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstdint> namespace raft { class handle_t; } namespace ML { namespace Datasets { /** * @brief GPU-equivalent of sklearn.datasets.make_regression as documented at: * https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_regression.html * * @param[in] handle cuML handle * @param[out] out Row-major (samples, features) matrix to store * the problem data * @param[out] values Row-major (samples, targets) matrix to store * the values for the regression problem * @param[in] n_rows Number of samples * @param[in] n_cols Number of features * @param[in] n_informative Number of informative features (non-zero * coefficients) * @param[out] coef Row-major (features, targets) matrix to store * the coefficients used to generate the values * for the regression problem. If nullptr is * given, nothing will be written * @param[in] n_targets Number of targets (generated values per sample) * @param[in] bias A scalar that will be added to the values * @param[in] effective_rank The approximate rank of the data matrix (used * to create correlations in the data). -1 is the * code to use well-conditioned data * @param[in] tail_strength The relative importance of the fat noisy tail * of the singular values profile if * effective_rank is not -1 * @param[in] noise Standard deviation of the gaussian noise * applied to the output * @param[in] shuffle Shuffle the samples and the features * @param[in] seed Seed for the random number generator */ void make_regression(const raft::handle_t& handle, float* out, float* values, int64_t n_rows, int64_t n_cols, int64_t n_informative, float* coef = nullptr, int64_t n_targets = 1LL, float bias = 0.0f, int64_t effective_rank = -1LL, float tail_strength = 0.5f, float noise = 0.0f, bool shuffle = true, uint64_t seed = 0ULL); void make_regression(const raft::handle_t& handle, double* out, double* values, int64_t n_rows, int64_t n_cols, int64_t n_informative, double* coef = nullptr, int64_t n_targets = 1LL, double bias = 0.0, int64_t effective_rank = -1LL, double tail_strength = 0.5, double noise = 0.0, bool shuffle = true, uint64_t seed = 0ULL); void make_regression(const raft::handle_t& handle, float* out, float* values, int n_rows, int n_cols, int n_informative, float* coef = nullptr, int n_targets = 1LL, float bias = 0.0f, int effective_rank = -1LL, float tail_strength = 0.5f, float noise = 0.0f, bool shuffle = true, uint64_t seed = 0ULL); void make_regression(const raft::handle_t& handle, double* out, double* values, int n_rows, int n_cols, int n_informative, double* coef = nullptr, int n_targets = 1LL, double bias = 0.0, int effective_rank = -1LL, double tail_strength = 0.5, double noise = 0.0, bool shuffle = true, uint64_t seed = 0ULL); } // namespace Datasets } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/linear_model/glm_api.h
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/linear_model/qn.h> #include <cuml/cuml_api.h> #include <stdbool.h> #ifdef __cplusplus namespace ML::GLM { extern "C" { #endif cumlError_t cumlSpQnFit(cumlHandle_t cuml_handle, const qn_params* pams, float* X, float* y, int N, int D, int C, float* w0, float* f, int* num_iters, bool X_col_major); cumlError_t cumlDpQnFit(cumlHandle_t cuml_handle, const qn_params* pams, double* X, double* y, int N, int D, int C, double* w0, double* f, int* num_iters, bool X_col_major); #ifdef __cplusplus } } #endif
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/linear_model/glm.hpp
/* * Copyright (c) 2018-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/linear_model/qn.h> #include <raft/core/handle.hpp> namespace ML { namespace GLM { /** * @defgroup olsFit fit an ordinary least squares model * @param input device pointer to feature matrix n_rows x n_cols * @param n_rows number of rows of the feature matrix * @param n_cols number of columns of the feature matrix * @param labels device pointer to label vector of length n_rows * @param coef device pointer to hold the solution for weights of size n_cols * @param intercept host pointer to hold the solution for bias term of size 1 * @param fit_intercept if true, fit intercept * @param normalize if true, normalize data to zero mean, unit variance * @param algo specifies which solver to use (0: SVD, 1: Eigendecomposition, 2: * QR-decomposition) * @param sample_weight device pointer to sample weight vector of length n_rows (nullptr for uniform weights) This vector is modified during the computation * @{ */ void olsFit(const raft::handle_t& handle, float* input, size_t n_rows, size_t n_cols, float* labels, float* coef, float* intercept, bool fit_intercept, bool normalize, int algo = 0, float* sample_weight = nullptr); void olsFit(const raft::handle_t& handle, double* input, size_t n_rows, size_t n_cols, double* labels, double* coef, double* intercept, bool fit_intercept, bool normalize, int algo = 0, double* sample_weight = nullptr); /** @} */ /** * @defgroup ridgeFit fit a ridge regression model (l2 regularized least squares) * @param input device pointer to feature matrix n_rows x n_cols * @param n_rows number of rows of the feature matrix * @param n_cols number of columns of the feature matrix * @param labels device pointer to label vector of length n_rows * @param alpha host pointer to parameters of the l2 regularizer * @param n_alpha number of regularization parameters * @param coef device pointer to hold the solution for weights of size n_cols * @param intercept host pointer to hold the solution for bias term of size 1 * @param fit_intercept if true, fit intercept * @param normalize if true, normalize data to zero mean, unit variance * @param algo specifies which solver to use (0: SVD, 1: Eigendecomposition) * @param sample_weight device pointer to sample weight vector of length n_rows (nullptr for uniform weights) This vector is modified during the computation * @{ */ void ridgeFit(const raft::handle_t& handle, float* input, size_t n_rows, size_t n_cols, float* labels, float* alpha, int n_alpha, float* coef, float* intercept, bool fit_intercept, bool normalize, int algo = 0, float* sample_weight = nullptr); void ridgeFit(const raft::handle_t& handle, double* input, size_t n_rows, size_t n_cols, double* labels, double* alpha, int n_alpha, double* coef, double* intercept, bool fit_intercept, bool normalize, int algo = 0, double* sample_weight = nullptr); /** @} */ /** * @defgroup glmPredict to make predictions with a fitted ordinary least squares and ridge * regression model * @param input device pointer to feature matrix n_rows x n_cols * @param n_rows number of rows of the feature matrix * @param n_cols number of columns of the feature matrix * @param coef weights of the model * @param intercept bias term of the model * @param preds device pointer to store predictions of size n_rows * @{ */ void gemmPredict(const raft::handle_t& handle, const float* input, size_t n_rows, size_t n_cols, const float* coef, float intercept, float* preds); void gemmPredict(const raft::handle_t& handle, const double* input, size_t n_rows, size_t n_cols, const double* coef, double intercept, double* preds); /** @} */ /** * @brief Fit a GLM using quasi newton methods. * * @param cuml_handle reference to raft::handle_t object * @param params model parameters * @param X device pointer to a contiguous feature matrix of dimension [N, D] * @param X_col_major true if X is stored column-major * @param y device pointer to label vector of length N * @param N number of examples * @param D number of features * @param C number of outputs (number of classes or `1` for regression) * @param w0 device pointer of size (D + (fit_intercept ? 1 : 0)) * C with initial point, * overwritten by final result. * @param f host pointer holding the final objective value * @param num_iters host pointer holding the actual number of iterations taken * @param sample_weight device pointer to sample weight vector of length n_rows (nullptr for uniform weights) * @param svr_eps epsilon parameter for svr */ template <typename T, typename I = int> void qnFit(const raft::handle_t& cuml_handle, const qn_params& params, T* X, bool X_col_major, T* y, I N, I D, I C, T* w0, T* f, int* num_iters, T* sample_weight = nullptr, T svr_eps = 0); /** * @brief Fit a GLM using quasi newton methods. * * @param cuml_handle reference to raft::handle_t object * @param params model parameters * @param X_values feature matrix values (CSR format), length = X_nnz * @param X_cols feature matrix columns (CSR format), length = X_nnz, range = [0, ... D-1] * @param X_row_ids feature matrix compressed row ids (CSR format), * length = N + 1, range = [0, ... X_nnz] * @param X_nnz number of non-zero entries in the feature matrix (CSR format) * @param y device pointer to label vector of length N * @param N number of examples * @param D number of features * @param C number of outputs (number of classes or `1` for regression) * @param w0 device pointer of size (D + (fit_intercept ? 1 : 0)) * C with initial point, * overwritten by final result. * @param f host pointer holding the final objective value * @param num_iters host pointer holding the actual number of iterations taken * @param sample_weight device pointer to sample weight vector of length n_rows (nullptr for uniform weights) * @param svr_eps epsilon parameter for svr */ template <typename T, typename I = int> void qnFitSparse(const raft::handle_t& cuml_handle, const qn_params& params, T* X_values, I* X_cols, I* X_row_ids, I X_nnz, T* y, I N, I D, I C, T* w0, T* f, int* num_iters, T* sample_weight = nullptr, T svr_eps = 0); /** * @brief Obtain the confidence scores of samples * * @param cuml_handle reference to raft::handle_t object * @param params model parameters * @param X device pointer to a contiguous feature matrix of dimension [N, D] * @param X_col_major true if X is stored column-major * @param N number of examples * @param D number of features * @param C number of outputs (number of classes or `1` for regression) * @param coefs device pointer to model coefficients. Length D if fit_intercept == false * else D+1 * @param scores device pointer to confidence scores of length N (for binary logistic: [0,1], * for multinomial: [0,...,C-1]) */ template <typename T, typename I = int> void qnDecisionFunction(const raft::handle_t& cuml_handle, const qn_params& params, T* X, bool X_col_major, I N, I D, I C, T* coefs, T* scores); /** * @brief Obtain the confidence scores of samples * * @param cuml_handle reference to raft::handle_t object * @param params model parameters * @param X_values feature matrix values (CSR format), length = X_nnz * @param X_cols feature matrix columns (CSR format), length = X_nnz, range = [0, ... D-1] * @param X_row_ids feature matrix compressed row ids (CSR format), * length = N + 1, range = [0, ... X_nnz] * @param X_nnz number of non-zero entries in the feature matrix (CSR format) * @param N number of examples * @param D number of features * @param C number of outputs (number of classes or `1` for regression) * @param coefs device pointer to model coefficients. Length D if fit_intercept == false * else D+1 * @param scores device pointer to confidence scores of length N (for binary logistic: [0,1], * for multinomial: [0,...,C-1]) */ template <typename T, typename I = int> void qnDecisionFunctionSparse(const raft::handle_t& cuml_handle, const qn_params& params, T* X_values, I* X_cols, I* X_row_ids, I X_nnz, I N, I D, I C, T* coefs, T* scores); /** * @brief Predict a GLM using quasi newton methods. * * @param cuml_handle reference to raft::handle_t object * @param params model parameters * @param X device pointer to a contiguous feature matrix of dimension [N, D] * @param X_col_major true if X is stored column-major * @param N number of examples * @param D number of features * @param C number of outputs (number of classes or `1` for regression) * @param coefs device pointer to model coefficients. Length D if fit_intercept == false * else D+1 * @param preds device pointer to predictions of length N (for binary logistic: [0,1], * for multinomial: [0,...,C-1]) */ template <typename T, typename I = int> void qnPredict(const raft::handle_t& cuml_handle, const qn_params& params, T* X, bool X_col_major, I N, I D, I C, T* coefs, T* preds); /** * @brief Predict a GLM using quasi newton methods. * * @param cuml_handle reference to raft::handle_t object * @param params model parameters * @param X_values feature matrix values (CSR format), length = X_nnz * @param X_cols feature matrix columns (CSR format), length = X_nnz, range = [0, ... D-1] * @param X_row_ids feature matrix compressed row ids (CSR format), * length = N + 1, range = [0, ... X_nnz] * @param X_nnz number of non-zero entries in the feature matrix (CSR format) * @param N number of examples * @param D number of features * @param C number of outputs (number of classes or `1` for regression) * @param coefs device pointer to model coefficients. Length D if fit_intercept == false * else D+1 * @param preds device pointer to predictions of length N (for binary logistic: [0,1], * for multinomial: [0,...,C-1]) */ template <typename T, typename I = int> void qnPredictSparse(const raft::handle_t& cuml_handle, const qn_params& params, T* X_values, I* X_cols, I* X_row_ids, I X_nnz, I N, I D, I C, T* coefs, T* preds); } // namespace GLM } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/linear_model/ols_mg.hpp
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/linear_model/glm.hpp> #include <cumlprims/opg/matrix/data.hpp> #include <cumlprims/opg/matrix/part_descriptor.hpp> namespace ML { namespace OLS { namespace opg { /** * @brief performs MNMG fit operation for the ridge regression * @param[in] handle: the internal cuml handle object * @param[in] input_data: vector holding all partitions for that rank * @param[in] input_desc: PartDescriptor object for the input * @param[in] labels: labels data * @param[out] coef: learned regression coefficients * @param[out] intercept: intercept value * @param[in] fit_intercept: fit intercept or not * @param[in] normalize: normalize the data or not * @param[in] algo: which algorithm is used for OLS. 0 is for SVD, 1 is for eig. * @param[in] verbose */ void fit(raft::handle_t& handle, std::vector<MLCommon::Matrix::Data<float>*>& input_data, MLCommon::Matrix::PartDescriptor& input_desc, std::vector<MLCommon::Matrix::Data<float>*>& labels, float* coef, float* intercept, bool fit_intercept, bool normalize, int algo, bool verbose); void fit(raft::handle_t& handle, std::vector<MLCommon::Matrix::Data<double>*>& input_data, MLCommon::Matrix::PartDescriptor& input_desc, std::vector<MLCommon::Matrix::Data<double>*>& labels, double* coef, double* intercept, bool fit_intercept, bool normalize, int algo, bool verbose); /** * @brief performs MNMG prediction for OLS * @param[in] handle: the internal cuml handle object * @param[in] rank_sizes: includes all the partition size information for the rank * @param[in] n_parts: number of partitions * @param[in] input: input data * @param[in] n_rows: number of rows of input data * @param[in] n_cols: number of cols of input data * @param[in] coef: OLS coefficients * @param[in] intercept: the fit intercept * @param[out] preds: predictions * @param[in] verbose */ void predict(raft::handle_t& handle, MLCommon::Matrix::RankSizePair** rank_sizes, size_t n_parts, MLCommon::Matrix::Data<float>** input, size_t n_rows, size_t n_cols, float* coef, float intercept, MLCommon::Matrix::Data<float>** preds, bool verbose); void predict(raft::handle_t& handle, MLCommon::Matrix::RankSizePair** rank_sizes, size_t n_parts, MLCommon::Matrix::Data<double>** input, size_t n_rows, size_t n_cols, double* coef, double intercept, MLCommon::Matrix::Data<double>** preds, bool verbose); }; // end namespace opg }; // end namespace OLS }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/linear_model/qn.h
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <stdbool.h> #ifdef __cplusplus namespace ML::GLM { extern "C" { #endif /** Loss function types supported by the Quasi-Newton solvers. */ enum qn_loss_type { /** Logistic classification. * Expected target: {0, 1}. */ QN_LOSS_LOGISTIC = 0, /** L2 regression. * Expected target: R. */ QN_LOSS_SQUARED = 1, /** Softmax classification.. * Expected target: {0, 1, ...}. */ QN_LOSS_SOFTMAX = 2, /** Hinge. * Expected target: {0, 1}. */ QN_LOSS_SVC_L1 = 3, /** Squared-hinge. * Expected target: {0, 1}. */ QN_LOSS_SVC_L2 = 4, /** Epsilon-insensitive. * Expected target: R. */ QN_LOSS_SVR_L1 = 5, /** Epsilon-insensitive-squared. * Expected target: R. */ QN_LOSS_SVR_L2 = 6, /** L1 regression. * Expected target: R. */ QN_LOSS_ABS = 7, /** Someone forgot to set the loss type! */ QN_LOSS_UNKNOWN = 99 }; #ifndef __cplusplus typedef enum qn_loss_type qn_loss_type; #endif struct qn_params { /** Loss type. */ qn_loss_type loss; /** Regularization: L1 component. */ double penalty_l1; /** Regularization: L2 component. */ double penalty_l2; /** Convergence criteria: the threshold on the gradient. */ double grad_tol; /** Convergence criteria: the threshold on the function change. */ double change_tol; /** Maximum number of iterations. */ int max_iter; /** Maximum number of linesearch (inner loop) iterations. */ int linesearch_max_iter; /** Number of vectors approximating the hessian (l-bfgs). */ int lbfgs_memory; /** Triggers extra output when greater than zero. */ int verbose; /** Whether to fit the bias term. */ bool fit_intercept; /** * Whether to divide the L1 and L2 regularization parameters by the sample size. * * Note, the defined QN loss functions normally are scaled for the sample size, * e.g. the average across the data rows is calculated. * Enabling `penalty_normalized` makes this solver's behavior compatible to those solvers, * which do not scale the loss functions (like sklearn.LogisticRegression()). */ bool penalty_normalized; #ifdef __cplusplus qn_params() : loss(QN_LOSS_UNKNOWN), penalty_l1(0), penalty_l2(0), grad_tol(1e-4), change_tol(1e-5), max_iter(1000), linesearch_max_iter(50), lbfgs_memory(5), verbose(0), fit_intercept(true), penalty_normalized(true) { } #endif }; #ifndef __cplusplus typedef struct qn_params qn_params; #endif #ifdef __cplusplus } } #endif
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/linear_model/ridge_mg.hpp
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cumlprims/opg/matrix/data.hpp> #include <cumlprims/opg/matrix/part_descriptor.hpp> #include "glm.hpp" namespace ML { namespace Ridge { namespace opg { /** * @brief performs MNMG fit operation for the ridge regression * @param[in] handle: the internal cuml handle object * @param[in] input_data: vector holding all partitions for that rank * @param[in] input_desc: PartDescriptor object for the input * @param[in] labels: labels data * @param[in] alpha: ridge parameter * @param[in] n_alpha: number of ridge parameters. Only one parameter is supported right now. * @param[out] coef: learned regression coefficients * @param[out] intercept: intercept value * @param[in] fit_intercept: fit intercept or not * @param[in] normalize: normalize the data or not * @param[in] algo: the algorithm to use for fitting * @param[in] verbose */ void fit(raft::handle_t& handle, std::vector<MLCommon::Matrix::Data<float>*>& input_data, MLCommon::Matrix::PartDescriptor& input_desc, std::vector<MLCommon::Matrix::Data<float>*>& labels, float* alpha, int n_alpha, float* coef, float* intercept, bool fit_intercept, bool normalize, int algo, bool verbose); void fit(raft::handle_t& handle, std::vector<MLCommon::Matrix::Data<double>*>& input_data, MLCommon::Matrix::PartDescriptor& input_desc, std::vector<MLCommon::Matrix::Data<double>*>& labels, double* alpha, int n_alpha, double* coef, double* intercept, bool fit_intercept, bool normalize, int algo, bool verbose); /** * @brief performs MNMG prediction for OLS * @param[in] handle: the internal cuml handle object * @param[in] rank_sizes: includes all the partition size information for the rank * @param[in] n_parts: number of partitions * @param[in] input: input data * @param[in] n_rows: number of rows of input data * @param[in] n_cols: number of cols of input data * @param[in] coef: OLS coefficients * @param[in] intercept: the fit intercept * @param[out] preds: predictions * @param[in] verbose */ void predict(raft::handle_t& handle, MLCommon::Matrix::RankSizePair** rank_sizes, size_t n_parts, MLCommon::Matrix::Data<float>** input, size_t n_rows, size_t n_cols, float* coef, float intercept, MLCommon::Matrix::Data<float>** preds, bool verbose); void predict(raft::handle_t& handle, MLCommon::Matrix::RankSizePair** rank_sizes, size_t n_parts, MLCommon::Matrix::Data<double>** input, size_t n_rows, size_t n_cols, double* coef, double intercept, MLCommon::Matrix::Data<double>** preds, bool verbose); }; // end namespace opg }; // end namespace Ridge }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/linear_model/preprocess_mg.hpp
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cumlprims/opg/matrix/data.hpp> #include <cumlprims/opg/matrix/part_descriptor.hpp> #include <raft/core/comms.hpp> #include <raft/core/handle.hpp> namespace ML { namespace GLM { namespace opg { void preProcessData(raft::handle_t& handle, std::vector<MLCommon::Matrix::Data<float>*>& input_data, MLCommon::Matrix::PartDescriptor& input_desc, std::vector<MLCommon::Matrix::Data<float>*>& labels, float* mu_input, float* mu_labels, float* norm2_input, bool fit_intercept, bool normalize, cudaStream_t* streams, int n_streams, bool verbose); void preProcessData(raft::handle_t& handle, std::vector<MLCommon::Matrix::Data<double>*>& input_data, MLCommon::Matrix::PartDescriptor& input_desc, std::vector<MLCommon::Matrix::Data<double>*>& labels, double* mu_input, double* mu_labels, double* norm2_input, bool fit_intercept, bool normalize, cudaStream_t* streams, int n_streams, bool verbose); void postProcessData(raft::handle_t& handle, std::vector<MLCommon::Matrix::Data<float>*>& input_data, MLCommon::Matrix::PartDescriptor& input_desc, std::vector<MLCommon::Matrix::Data<float>*>& labels, float* coef, float* intercept, float* mu_input, float* mu_labels, float* norm2_input, bool fit_intercept, bool normalize, cudaStream_t* streams, int n_streams, bool verbose); void postProcessData(raft::handle_t& handle, std::vector<MLCommon::Matrix::Data<double>*>& input_data, MLCommon::Matrix::PartDescriptor& input_desc, std::vector<MLCommon::Matrix::Data<double>*>& labels, double* coef, double* intercept, double* mu_input, double* mu_labels, double* norm2_input, bool fit_intercept, bool normalize, cudaStream_t* streams, int n_streams, bool verbose); }; // end namespace opg }; // namespace GLM }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/linear_model/qn_mg.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_runtime.h> #include <cuml/common/logger.hpp> #include <cuml/linear_model/qn.h> #include <raft/core/comms.hpp> #include <cumlprims/opg/matrix/data.hpp> #include <cumlprims/opg/matrix/part_descriptor.hpp> #include <vector> using namespace MLCommon; namespace ML { namespace GLM { namespace opg { /** * @brief Calculate unique class labels across multiple GPUs in a multi-node environment. * @param[in] handle: the internal cuml handle object * @param[in] input_desc: PartDescriptor object for the input * @param[in] labels: labels data * @returns host vector that stores the distinct labels */ std::vector<float> getUniquelabelsMG(const raft::handle_t& handle, Matrix::PartDescriptor& input_desc, std::vector<Matrix::Data<float>*>& labels); /** * @brief performs MNMG fit operation for the logistic regression using quasi newton methods * @param[in] handle: the internal cuml handle object * @param[in] input_data: vector holding all partitions for that rank * @param[in] input_desc: PartDescriptor object for the input * @param[in] labels: labels data * @param[out] coef: learned coefficients * @param[in] pams: model parameters * @param[in] X_col_major: true if X is stored column-major * @param[in] n_classes: number of outputs (number of classes or `1` for regression) * @param[out] f: host pointer holding the final objective value * @param[out] num_iters: host pointer holding the actual number of iterations taken */ void qnFit(raft::handle_t& handle, std::vector<Matrix::Data<float>*>& input_data, Matrix::PartDescriptor& input_desc, std::vector<Matrix::Data<float>*>& labels, float* coef, const qn_params& pams, bool X_col_major, int n_classes, float* f, int* num_iters); }; // namespace opg }; // namespace GLM }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/svm/svm_model.h
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { namespace SVM { // Contains array(s) for matrix storage template <typename math_t> struct SupportStorage { int nnz = -1; int* indptr = nullptr; int* indices = nullptr; math_t* data = nullptr; }; /** * Parameters that describe a trained SVM model. * All pointers are device pointers. */ template <typename math_t> struct SvmModel { int n_support; //!< Number of support vectors int n_cols; //!< Number of features math_t b; //!< Constant used in the decision function //! Non-zero dual coefficients ( dual_coef[i] = \f$ y_i \alpha_i \f$). //! Size [n_support]. math_t* dual_coefs; //! Support vector storage - can contain either CSR or dense SupportStorage<math_t> support_matrix; //! Indices (from the training set) of the support vectors, size [n_support]. int* support_idx; int n_classes; //!< Number of classes found in the input labels //! Device pointer for the unique classes. Size [n_classes] math_t* unique_labels; }; }; // namespace SVM }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/svm/svm_parameter.h
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { namespace SVM { enum SvmType { C_SVC, NU_SVC, EPSILON_SVR, NU_SVR }; /** * Numerical input parameters for an SVM. * * There are several parameters that control how long we train. The training * stops if: * - max_iter iterations are reached. If you pass -1, then * max_diff = 100 * n_rows * - the diff becomes less the tol * - the diff is changing less then 0.001*tol in nochange_steps consecutive * outer iterations. */ struct SvmParameter { double C; //!< Penalty term C double cache_size; //!< kernel cache size in MiB //! maximum number of outer SMO iterations. Use -1 to let the SMO solver set //! a default value (100*n_rows). int max_iter; int nochange_steps; //<! Number of steps to continue with non-changing diff double tol; //!< Tolerance used to stop fitting. int verbosity; //!< Print information about training double epsilon; //!< epsilon parameter for epsilon-SVR SvmType svmType; }; }; // namespace SVM }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/svm/svm_api.h
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/cuml_api.h> #ifdef __cplusplus extern "C" { #endif typedef enum cumlSvmKernelType { LINEAR, POLYNOMIAL, RBF, TANH } cumlSvmKernelType; /** * @defgroup SVM C-wrapper to C++ implementation of Support Vector Machine * * The output device buffers shall be unallocated on entry. * Note that n_support, n_classes and b are host scalars, all other output * pointers are device pointers. * * @param [in] handle the cuML handle * @param [in] input device pointer for the input data in column major format. * Size n_rows x n_cols. * @param [in] n_rows number of rows * @param [in] n_cols number of columns * @param [in] labels device pointer for the labels. Size n_rows. * @param [in] C penalty term * @param [in] cache_size size of kernel cache in device memory (MiB) * @param [in] max_iter maximum number of outer iterations in SmoSolver * @param [in] nochange_steps max number of outer iterations without change in convergence * @param [in] tol tolerance to stop fitting * @param [in] verbosity Fine grained control over logging of useful information * as algorithm executes. Currently passing anything greater than or equal to * CUML_LEVEL_INFO will make it execute quietly * @param [in] kernel type of kernel (LINEAR, POLYNOMIAL, RBF or TANH) * @param [in] degree of polynomial kernel (ignored by others) * @param [in] gamma multiplier in the RBF, POLYNOMIAL and TANH kernels * @param [in] coef0 additive constant in poly and tanh kernels * @param [out] n_support number of support vectors * @param [out] b constant used in the decision function * @param [out] dual_coefs non-zero dual coefficients, size [n_support]. * @param [out] x_support support vectors in column major format. * Size [n_support x n_cols]. * @param [out] support_idx indices (from the traning set) of the support * vectors, size [n_support]. * @param [out] n_classes number of classes found in the input labels * @param [out] unique_labels device pointer for the unique classes, * size [n_classes] * @return CUML_SUCCESS on success and other corresponding flags upon any failures. * @{ */ cumlError_t cumlSpSvcFit(cumlHandle_t handle, float* input, int n_rows, int n_cols, float* labels, float C, float cache_size, int max_iter, int nochange_steps, float tol, int verbosity, cumlSvmKernelType kernel, int degree, float gamma, float coef0, int* n_support, float* b, float** dual_coefs, float** x_support, int** support_idx, int* n_classes, float** unique_labels); cumlError_t cumlDpSvcFit(cumlHandle_t handle, double* input, int n_rows, int n_cols, double* labels, double C, double cache_size, int max_iter, int nochange_steps, double tol, int verbosity, cumlSvmKernelType kernel, int degree, double gamma, double coef0, int* n_support, double* b, double** dual_coefs, double** x_support, int** support_idx, int* n_classes, double** unique_labels); /** @} */ /** * @defgroup SVM C-wrapper to C++ implementation of Support Vector Machine * * The output preds array shall be allocated on entry. * * @param [in] handle the cuML handle * @param [in] input device pointer for the input data in column major format. * Size n_rows x n_cols. * @param [in] n_rows number of rows * @param [in] n_cols number of columns * @param [in] kernel type of kernel (LINEAR, POLYNOMIAL, RBF or TANH) * @param [in] degree of polynomial kernel (ignored by others) * @param [in] gamma multiplier in the RBF, POLYNOMIAL and TANH kernels * @param [in] coef0 additive constant in poly and tanh kernels * @param [in] n_support number of support vectors * @param [in] b constant used in the decision function * @param [in] dual_coefs non-zero dual coefficients, size [n_support]. * @param [in] x_support support vectors in column major format. * Size [n_support x n_cols]. * @param [in] n_classes number of classes found in the input labels * @param [in] unique_labels device pointer for the unique classes, * size [n_classes] * @param [out] preds device pointer for the predictions. Size [n_rows]. * @param [in] buffer_size size of temporary buffer in MiB * @param [in] predict_class whether to predict class label (true), or just * return the decision function value (false) * @return CUML_SUCCESS on success and other corresponding flags upon any failures. * @{ */ cumlError_t cumlSpSvcPredict(cumlHandle_t handle, float* input, int n_rows, int n_cols, cumlSvmKernelType kernel, int degree, float gamma, float coef0, int n_support, float b, float* dual_coefs, float* x_support, int n_classes, float* unique_labels, float* preds, float buffer_size, int predict_class); cumlError_t cumlDpSvcPredict(cumlHandle_t handle, double* input, int n_rows, int n_cols, cumlSvmKernelType kernel, int degree, double gamma, double coef0, int n_support, double b, double* dual_coefs, double* x_support, int n_classes, double* unique_labels, double* preds, double buffer_size, int predict_class); /** @} */ #ifdef __cplusplus } #endif
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/svm/svc.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "svm_model.h" #include "svm_parameter.h" #include <cuml/common/logger.hpp> #include <raft/core/handle.hpp> #include <raft/distance/distance_types.hpp> // namespace raft { // class handle_t; // } namespace ML { namespace SVM { // Forward declarations of the stateless API /** * @brief Fit a support vector classifier to the training data. * * Each row of the input data stores a feature vector. * We use the SMO method to fit the SVM. * * The output device buffers in model shall be unallocated on entry. * * @tparam math_t floating point type * @param [in] handle the cuML handle * @param [in] input device pointer for the input data in column major format. * Size n_rows x n_cols. * @param [in] n_rows number of rows * @param [in] n_cols number of columns * @param [in] labels device pointer for the labels. Size [n_rows]. * @param [in] param parameters for training * @param [in] kernel_params parameters for the kernel function * @param [out] model parameters of the trained model * @param [in] sample_weight optional sample weights, size [n_rows] */ template <typename math_t> void svcFit(const raft::handle_t& handle, math_t* input, int n_rows, int n_cols, math_t* labels, const SvmParameter& param, raft::distance::kernels::KernelParams& kernel_params, SvmModel<math_t>& model, const math_t* sample_weight); /** * @brief Fit a support vector classifier to the training data. * * Each row of the input data stores a feature vector. * We use the SMO method to fit the SVM. * * The output device buffers in model shall be unallocated on entry. * * @tparam math_t floating point type * @param [in] handle the cuML handle * @param [in] indptr device pointer for CSR row positions. Size [n_rows + 1]. * @param [in] indices device pointer for CSR column indices. Size [nnz]. * @param [in] data device pointer for the CSR data. Size [nnz]. * @param [in] n_rows number of rows * @param [in] n_cols number of columns * @param [in] nnz number of stored entries. * @param [in] labels device pointer for the labels. Size [n_rows]. * @param [in] param parameters for training * @param [in] kernel_params parameters for the kernel function * @param [out] model parameters of the trained model * @param [in] sample_weight optional sample weights, size [n_rows] */ template <typename math_t> void svcFitSparse(const raft::handle_t& handle, int* indptr, int* indices, math_t* data, int n_rows, int n_cols, int nnz, math_t* labels, const SvmParameter& param, raft::distance::kernels::KernelParams& kernel_params, SvmModel<math_t>& model, const math_t* sample_weight); /** * @brief Predict classes or decision function value for samples in input. * * We evaluate the decision function f(x_i). Depending on the parameter * predict_class, we either return f(x_i) or the label corresponding to * sign(f(x_i)). * * The predictions are calculated according to the following formulas: * \f[ * f(x_i) = \sum_{j=1}^n_support K(x_i, x_j) * dual_coefs[j] + b) * \f] * * pred(x_i) = label[sign(f(x_i))], if predict_class==true, or * pred(x_i) = f(x_i), if predict_class==false. * * @tparam math_t floating point type * @param handle the cuML handle * @param [in] input device pointer for the input data in column major format, * size [n_rows x n_cols]. * @param [in] n_rows number of rows (input vectors) * @param [in] n_cols number of columns (features) * @param [in] kernel_params parameters for the kernel function * @param [in] model SVM model parameters * @param [out] preds device pointer to store the predicted class labels. * Size [n_rows]. Should be allocated on entry. * @param [in] buffer_size size of temporary buffer in MiB * @param [in] predict_class whether to predict class label (true), or just * return the decision function value (false) */ template <typename math_t> void svcPredict(const raft::handle_t& handle, math_t* input, int n_rows, int n_cols, raft::distance::kernels::KernelParams& kernel_params, const SvmModel<math_t>& model, math_t* preds, math_t buffer_size, bool predict_class); /** * @brief Predict classes or decision function value for samples in input. * * We evaluate the decision function f(x_i). Depending on the parameter * predict_class, we either return f(x_i) or the label corresponding to * sign(f(x_i)). * * The predictions are calculated according to the following formulas: * \f[ * f(x_i) = \sum_{j=1}^n_support K(x_i, x_j) * dual_coefs[j] + b) * \f] * * pred(x_i) = label[sign(f(x_i))], if predict_class==true, or * pred(x_i) = f(x_i), if predict_class==falsee. * * @tparam math_t floating point type * @param handle the cuML handle * @param [in] indptr device pointer for CSR row positions. Size [n_rows + 1]. * @param [in] indices device pointer for CSR column indices. Size [nnz]. * @param [in] data device pointer for the CSR data. Size [nnz]. * @param [in] n_rows number of rows * @param [in] n_cols number of columns * @param [in] nnz number of stored entries. * @param [in] kernel_params parameters for the kernel function * @param [in] model SVM model parameters * @param [out] preds device pointer to store the predicted class labels. * Size [n_rows]. Should be allocated on entry. * @param [in] buffer_size size of temporary buffer in MiB * @param [in] predict_class whether to predict class label (true), or just * return the decision function value (false) */ template <typename math_t> void svcPredictSparse(const raft::handle_t& handle, int* indptr, int* indices, math_t* data, int n_rows, int n_cols, int nnz, raft::distance::kernels::KernelParams& kernel_params, const SvmModel<math_t>& model, math_t* preds, math_t buffer_size, bool predict_class); /** * Deallocate device buffers in the SvmModel struct. * * @param [in] handle cuML handle * @param [inout] m SVM model parameters */ template <typename math_t> void svmFreeBuffers(const raft::handle_t& handle, SvmModel<math_t>& m); /** * @brief C-Support Vector Classification * * This is a Scikit-Learn like wrapper around the stateless C++ functions. * See Issue #456 for general discussion about stateful Sklearn like wrappers. * * The classifier will be fitted using the SMO algorithm in dual space. * * The decision function takes the following form * \f[ * sign\left( \sum_{i=1}^{N_{support}} y_i \alpha_i K(x_i,x) + b \right), * \f] * where \f$x_i\f$ are the support vectors, and \f$ y_i \alpha_i \f$ are the dual * coordinates. * * The penalty parameter C limits the values of the dual coefficients * \f[ 0 <= \alpha <= C \f] * */ template <typename math_t> class SVC { public: // Public members for easier access during testing from Python. raft::distance::kernels::KernelParams kernel_params; SvmParameter param; SvmModel<math_t> model; /** * @brief Constructs a support vector classifier * @param handle cuML handle * @param C penalty term * @param tol tolerance to stop fitting * @param kernel_params parameters for kernels * @param cache_size size of kernel cache in device memory (MiB) * @param max_iter maximum number of outer iterations in SmoSolver * @param nochange_steps number of steps with no change wrt convergence * @param verbosity verbosity level for logging messages during execution */ SVC(raft::handle_t& handle, math_t C = 1, math_t tol = 1.0e-3, raft::distance::kernels::KernelParams kernel_params = raft::distance::kernels::KernelParams{raft::distance::kernels::LINEAR, 3, 1, 0}, math_t cache_size = 200, int max_iter = -1, int nochange_steps = 1000, int verbosity = CUML_LEVEL_INFO); ~SVC(); /** * @brief Fit a support vector classifier to the training data. * * Each row of the input data stores a feature vector. * We use the SMO method to fit the SVM. * * @param input device pointer for the input data in column major format. Size n_rows x n_cols. * @param n_rows number of rows * @param n_cols number of columns * @param labels device pointer for the labels. Size n_rows. * @param [in] sample_weight optional sample weights, size [n_rows] */ void fit( math_t* input, int n_rows, int n_cols, math_t* labels, const math_t* sample_weight = nullptr); /** * @brief Predict classes for samples in input. * @param [in] input device pointer for the input data in column major format, * size [n_rows x n_cols]. * @param [in] n_rows number of vectors * @param [in] n_cols number of features * @param [out] preds device pointer to store the predicted class labels. * Size [n_rows]. Should be allocated on entry. */ void predict(math_t* input, int n_rows, int n_cols, math_t* preds); /** * @brief Calculate decision function value for samples in input. * @param [in] input device pointer for the input data in column major format, * size [n_rows x n_cols]. * @param [in] n_rows number of vectors * @param [in] n_cols number of features * @param [out] preds device pointer to store the decision function value * Size [n_rows]. Should be allocated on entry. */ void decisionFunction(math_t* input, int n_rows, int n_cols, math_t* preds); private: const raft::handle_t& handle; }; }; // end namespace SVM }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/svm/svr.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cublas_v2.h> #include <cuml/matrix/kernelparams.h> namespace ML { namespace SVM { template <typename math_t> struct SvmModel; struct SvmParameter; // Forward declarations of the stateless API /** * @brief Fit a support vector regressor to the training data. * * Each row of the input data stores a feature vector. * * The output buffers in model shall be unallocated on entry. * * @tparam math_t floating point type * @param [in] handle the cuML handle * @param [in] X device pointer for the input data in column major format. * Size n_rows x n_cols. * @param [in] n_rows number of rows * @param [in] n_cols number of columns * @param [in] y device pointer for target values. Size [n_rows]. * @param [in] param parameters for training * @param [in] kernel_params parameters for the kernel function * @param [out] model parameters of the trained model * @param [in] sample_weight optional sample weights, size [n_rows] */ template <typename math_t> void svrFit(const raft::handle_t& handle, math_t* X, int n_rows, int n_cols, math_t* y, const SvmParameter& param, MLCommon::Matrix::KernelParams& kernel_params, SvmModel<math_t>& model, const math_t* sample_weight = nullptr); /** * @brief Fit a support vector regressor to the training data. * * Each row of the input data stores a feature vector. * * The output buffers in model shall be unallocated on entry. * * @tparam math_t floating point type * @param [in] handle the cuML handle * @param [in] indptr device pointer for CSR row positions. Size [n_rows + 1]. * @param [in] indices device pointer for CSR column indices. Size [nnz]. * @param [in] data device pointer for the CSR data. Size [nnz]. * @param [in] n_rows number of rows * @param [in] n_cols number of columns * @param [in] nnz number of stored entries. * @param [in] y device pointer for target values. Size [n_rows]. * @param [in] param parameters for training * @param [in] kernel_params parameters for the kernel function * @param [out] model parameters of the trained model * @param [in] sample_weight optional sample weights, size [n_rows] */ template <typename math_t> void svrFitSparse(const raft::handle_t& handle, int* indptr, int* indices, math_t* data, int n_rows, int n_cols, int nnz, math_t* y, const SvmParameter& param, raft::distance::kernels::KernelParams& kernel_params, SvmModel<math_t>& model, const math_t* sample_weight = nullptr); // For prediction we use svcPredict }; // end namespace SVM }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/svm/linear.hpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/handle.hpp> namespace ML { namespace SVM { struct LinearSVMParams { /** The regularization term. */ enum Penalty { /** Abs. value of the weights: `sum |w|` */ L1, /** Squared value of the weights: `sum w^2` */ L2 }; /** The loss function. */ enum Loss { /** `max(1 - y_i x_i w, 0)` */ HINGE, /** `max(1 - y_i x_i w, 0)^2` */ SQUARED_HINGE, /** `max(|y_i - x_i w| - epsilon, 0)` */ EPSILON_INSENSITIVE, /** `max(|y_i - x_i w| - epsilon, 0)^2` */ SQUARED_EPSILON_INSENSITIVE }; /** The regularization term. */ Penalty penalty = L2; /** The loss function. */ Loss loss = HINGE; /** Whether to fit the bias term. */ bool fit_intercept = true; /** When true, the bias term is treated the same way as other data features. * Enabling this feature forces an extra copying the input data X. */ bool penalized_intercept = false; /** Whether to estimate probabilities using Platt scaling (applicable to SVC). */ bool probability = false; /** Maximum number of iterations for the underlying QN solver. */ int max_iter = 1000; /** * Maximum number of linesearch (inner loop) iterations for the underlying QN solver. */ int linesearch_max_iter = 100; /** * Number of vectors approximating the hessian for the underlying QN solver (l-bfgs). */ int lbfgs_memory = 5; /** Triggers extra output when greater than zero. */ int verbose = 0; /** * The constant scaling factor of the main term in the loss function. * (You can also think of that as the inverse factor of the penalty term). */ double C = 1.0; /** The threshold on the gradient for the underlying QN solver. */ double grad_tol = 0.0001; /** The threshold on the function change for the underlying QN solver. */ double change_tol = 0.00001; /** The epsilon-sensitivity parameter (applicable to the SVM-regression (SVR) loss functions). */ double epsilon = 0.0; }; template <typename T> struct LinearSVMModel { /** * C-style (row-major) matrix of coefficients of size `(coefRows, coefCols)` * where * coefRows = nCols + (params.fit_intercept ? 1 : 0) * coefCols = nClasses == 2 ? 1 : nClasses */ T* w; /** Sorted, unique values of input array `y`. */ T* classes = nullptr; /** * C-style (row-major) matrix of the probabolistic model calibration coefficients. * It's empty if `LinearSVMParams.probability == false`. * Otherwise, it's size is `(2, coefCols)`. * where * coefCols = nClasses == 2 ? 1 : nClasses */ T* probScale = nullptr; /** Number of classes (not applicable for regression). */ std::size_t nClasses = 0; /** Number of rows of `w`, which is the number of data features plus maybe bias. */ std::size_t coefRows; /** It's 1 for binary classification or regression; nClasses for multiclass. */ inline std::size_t coefCols() const { return nClasses <= 2 ? 1 : nClasses; } /** * @brief Allocate and fit the LinearSVM model. * * @param [in] handle the cuML handle. * @param [in] params the model parameters. * @param [in] X the input data matrix of size (nRows, nCols) in column-major format. * @param [in] nRows the number of input samples. * @param [in] nCols the number of feature dimensions. * @param [in] y the target - a single vector of either real (regression) or * categorical (classification) values (nRows, ). * @param [in] sampleWeight the non-negative weights for the training sample (nRows, ). * @return the trained model (don't forget to call `free` on it after use). */ static LinearSVMModel<T> fit(const raft::handle_t& handle, const LinearSVMParams& params, const T* X, const std::size_t nRows, const std::size_t nCols, const T* y, const T* sampleWeight); /** * @brief Explicitly allocate the data for the model without training it. * * @param [in] handle the cuML handle. * @param [in] params the model parameters. * @param [in] nCols the number of feature dimensions. * @param [in] nClasses the number of classes in the dataset (not applicable for regression). * @return the trained model (don't forget to call `free` on it after use). */ static LinearSVMModel<T> allocate(const raft::handle_t& handle, const LinearSVMParams& params, const std::size_t nCols, const std::size_t nClasses = 0); /** @brief Free the allocated memory. The model is not usable after the call of this method. */ static void free(const raft::handle_t& handle, LinearSVMModel<T>& model); /** * @brief Predict using the trained LinearSVM model. * * @param [in] handle the cuML handle. * @param [in] params the model parameters. * @param [in] model the trained model. * @param [in] X the input data matrix of size (nRows, nCols) in column-major format. * @param [in] nRows the number of input samples. * @param [in] nCols the number of feature dimensions. * @param [out] out the predictions (nRows, ). */ static void predict(const raft::handle_t& handle, const LinearSVMParams& params, const LinearSVMModel<T>& model, const T* X, const std::size_t nRows, const std::size_t nCols, T* out); /** * @brief Calculate decision function value for samples in input. * @param [in] handle the cuML handle. * @param [in] params the model parameters. * @param [in] model the trained model. * @param [in] X the input data matrix of size (nRows, nCols) in column-major format. * @param [in] nRows number of vectors * @param [in] nCols number of features * @param [out] out the decision function value of size (nRows, n_classes <= 2 ? 1 : n_classes) in * row-major format. */ static void decisionFunction(const raft::handle_t& handle, const LinearSVMParams& params, const LinearSVMModel<T>& model, const T* X, const std::size_t nRows, const std::size_t nCols, T* out); /** * @brief For SVC, predict the probabilities for each outcome. * * @param [in] handle the cuML handle. * @param [in] params the model parameters. * @param [in] model the trained model. * @param [in] X the input data matrix of size (nRows, nCols) in column-major format. * @param [in] nRows the number of input samples. * @param [in] nCols the number of feature dimensions. * @param [in] log whether to output log-probabilities instead of probabilities. * @param [out] out the estimated probabilities (nRows, nClasses) in row-major format. */ static void predictProba(const raft::handle_t& handle, const LinearSVMParams& params, const LinearSVMModel<T>& model, const T* X, const std::size_t nRows, const std::size_t nCols, const bool log, T* out); }; } // namespace SVM } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/forest_model.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <cuml/experimental/fil/decision_forest.hpp> #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/buffer.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> #include <cuml/experimental/fil/detail/raft_proto/handle.hpp> #include <cuml/experimental/fil/infer_kind.hpp> #include <type_traits> #include <variant> namespace ML { namespace experimental { namespace fil { /** * A model used for performing inference with FIL * * This struct is a wrapper for all variants of decision_forest supported by a * standard FIL build. */ struct forest_model { /** Wrap a decision_forest in a full forest_model object */ forest_model(decision_forest_variant&& forest = decision_forest_variant{}) : decision_forest_{forest} { } /** The number of features per row expected by the model */ auto num_features() { return std::visit([](auto&& concrete_forest) { return concrete_forest.num_features(); }, decision_forest_); } /** The number of outputs per row generated by the model */ auto num_outputs() { return std::visit([](auto&& concrete_forest) { return concrete_forest.num_outputs(); }, decision_forest_); } /** The number of trees in the model */ auto num_trees() { return std::visit([](auto&& concrete_forest) { return concrete_forest.num_trees(); }, decision_forest_); } /** Whether or not leaf nodes use vector outputs */ auto has_vector_leaves() { return std::visit([](auto&& concrete_forest) { return concrete_forest.has_vector_leaves(); }, decision_forest_); } /** The operation used for postprocessing all outputs for a single row */ auto row_postprocessing() { return std::visit([](auto&& concrete_forest) { return concrete_forest.row_postprocessing(); }, decision_forest_); } /** Setter for row_postprocessing() */ void set_row_postprocessing(row_op val) { return std::visit( [&val](auto&& concrete_forest) { concrete_forest.set_row_postprocessing(val); }, decision_forest_); } /** The operation used for postprocessing each element of the output for a * single row */ auto elem_postprocessing() { return std::visit([](auto&& concrete_forest) { return concrete_forest.elem_postprocessing(); }, decision_forest_); } /** The type of memory (device/host) where the model is stored */ auto memory_type() { return std::visit([](auto&& concrete_forest) { return concrete_forest.memory_type(); }, decision_forest_); } /** The ID of the device on which this model is loaded */ auto device_index() { return std::visit([](auto&& concrete_forest) { return concrete_forest.device_index(); }, decision_forest_); } /** Whether or not model is loaded at double precision */ auto is_double_precision() { return std::visit( [](auto&& concrete_forest) { return std::is_same_v<typename std::remove_reference_t<decltype(concrete_forest)>::io_type, double>; }, decision_forest_); } /** * Perform inference on given input * * @param[out] output The buffer where model output should be stored. * This must be of size at least ROWS x num_outputs(). * @param[in] input The buffer containing input data. * @param[in] stream A raft_proto::cuda_stream, which (on GPU-enabled builds) is * a transparent wrapper for the cudaStream_t or (on CPU-only builds) a * CUDA-free placeholder object. * @param[in] predict_type Type of inference to perform. Defaults to summing * the outputs of all trees and produce an output per row. If set to * "per_tree", we will instead output all outputs of individual trees. * If set to "leaf_id", we will output the integer ID of the leaf node * for each tree. * @param[in] specified_chunk_size: Specifies the mini-batch size for * processing. This has different meanings on CPU and GPU, but on GPU it * corresponds to the number of rows evaluated per inference iteration * on a single block. It can take on any power of 2 from 1 to 32, and * runtime performance is quite sensitive to the value chosen. In general, * larger batches benefit from higher values, but it is hard to predict the * optimal value a priori. If omitted, a heuristic will be used to select a * reasonable value. On CPU, this argument can generally just be omitted. */ template <typename io_t> void predict(raft_proto::buffer<io_t>& output, raft_proto::buffer<io_t> const& input, raft_proto::cuda_stream stream = raft_proto::cuda_stream{}, infer_kind predict_type = infer_kind::default_kind, std::optional<index_type> specified_chunk_size = std::nullopt) { std::visit( [this, predict_type, &output, &input, &stream, &specified_chunk_size]( auto&& concrete_forest) { if constexpr (std::is_same_v< typename std::remove_reference_t<decltype(concrete_forest)>::io_type, io_t>) { concrete_forest.predict(output, input, stream, predict_type, specified_chunk_size); } else { throw type_error("Input type does not match model_type"); } }, decision_forest_); } /** * Perform inference on given input * * @param[in] handle The raft_proto::handle_t (wrapper for raft::handle_t * on GPU) which will be used to provide streams for evaluation. * @param[out] output The buffer where model output should be stored. If * this buffer is on host while the model is on device or vice versa, * work will be distributed across available streams to copy the data back * to this output location. This must be of size at least ROWS x num_outputs(). * @param[in] input The buffer containing input data. If * this buffer is on host while the model is on device or vice versa, * work will be distributed across available streams to copy the input data * to the appropriate location and perform inference. * @param[in] predict_type Type of inference to perform. Defaults to summing * the outputs of all trees and produce an output per row. If set to * "per_tree", we will instead output all outputs of individual trees. * If set to "leaf_id", we will output the integer ID of the leaf node * for each tree. * @param[in] specified_chunk_size: Specifies the mini-batch size for * processing. This has different meanings on CPU and GPU, but on GPU it * corresponds to the number of rows evaluated per inference iteration * on a single block. It can take on any power of 2 from 1 to 32, and * runtime performance is quite sensitive to the value chosen. In general, * larger batches benefit from higher values, but it is hard to predict the * optimal value a priori. If omitted, a heuristic will be used to select a * reasonable value. On CPU, this argument can generally just be omitted. */ template <typename io_t> void predict(raft_proto::handle_t const& handle, raft_proto::buffer<io_t>& output, raft_proto::buffer<io_t> const& input, infer_kind predict_type = infer_kind::default_kind, std::optional<index_type> specified_chunk_size = std::nullopt) { std::visit( [this, predict_type, &handle, &output, &input, &specified_chunk_size]( auto&& concrete_forest) { using model_io_t = typename std::remove_reference_t<decltype(concrete_forest)>::io_type; if constexpr (std::is_same_v<model_io_t, io_t>) { if (output.memory_type() == memory_type() && input.memory_type() == memory_type()) { concrete_forest.predict( output, input, handle.get_next_usable_stream(), predict_type, specified_chunk_size); } else { auto constexpr static const MIN_CHUNKS_PER_PARTITION = std::size_t{64}; auto constexpr static const MAX_CHUNK_SIZE = std::size_t{64}; auto row_count = input.size() / num_features(); auto partition_size = std::max(raft_proto::ceildiv(row_count, handle.get_usable_stream_count()), specified_chunk_size.value_or(MAX_CHUNK_SIZE) * MIN_CHUNKS_PER_PARTITION); auto partition_count = raft_proto::ceildiv(row_count, partition_size); for (auto i = std::size_t{}; i < partition_count; ++i) { auto stream = handle.get_next_usable_stream(); auto rows_in_this_partition = std::min(partition_size, row_count - i * partition_size); auto partition_in = raft_proto::buffer<io_t>{}; if (input.memory_type() != memory_type()) { partition_in = raft_proto::buffer<io_t>{rows_in_this_partition * num_features(), memory_type()}; raft_proto::copy<raft_proto::DEBUG_ENABLED>(partition_in, input, 0, i * partition_size * num_features(), partition_in.size(), stream); } else { partition_in = raft_proto::buffer<io_t>{input.data() + i * partition_size * num_features(), rows_in_this_partition * num_features(), memory_type()}; } auto partition_out = raft_proto::buffer<io_t>{}; if (output.memory_type() != memory_type()) { partition_out = raft_proto::buffer<io_t>{rows_in_this_partition * num_outputs(), memory_type()}; } else { partition_out = raft_proto::buffer<io_t>{output.data() + i * partition_size * num_outputs(), rows_in_this_partition * num_outputs(), memory_type()}; } concrete_forest.predict( partition_out, partition_in, stream, predict_type, specified_chunk_size); if (output.memory_type() != memory_type()) { raft_proto::copy<raft_proto::DEBUG_ENABLED>(output, partition_out, i * partition_size * num_outputs(), 0, partition_out.size(), stream); } } } } else { throw type_error("Input type does not match model_type"); } }, decision_forest_); } /** * Perform inference on given input * * @param[in] handle The raft_proto::handle_t (wrapper for raft::handle_t * on GPU) which will be used to provide streams for evaluation. * @param[out] output Pointer to the memory location where output should end * up * @param[in] input Pointer to the input data * @param[in] num_rows Number of rows in input * @param[in] out_mem_type The memory type (device/host) of the output * buffer * @param[in] in_mem_type The memory type (device/host) of the input buffer * @param[in] predict_type Type of inference to perform. Defaults to summing * the outputs of all trees and produce an output per row. If set to * "per_tree", we will instead output all outputs of individual trees. * If set to "leaf_id", we will output the integer ID of the leaf node * for each tree. * @param[in] specified_chunk_size: Specifies the mini-batch size for * processing. This has different meanings on CPU and GPU, but on GPU it * corresponds to the number of rows evaluated per inference iteration * on a single block. It can take on any power of 2 from 1 to 32, and * runtime performance is quite sensitive to the value chosen. In general, * larger batches benefit from higher values, but it is hard to predict the * optimal value a priori. If omitted, a heuristic will be used to select a * reasonable value. On CPU, this argument can generally just be omitted. */ template <typename io_t> void predict(raft_proto::handle_t const& handle, io_t* output, io_t* input, std::size_t num_rows, raft_proto::device_type out_mem_type, raft_proto::device_type in_mem_type, infer_kind predict_type = infer_kind::default_kind, std::optional<index_type> specified_chunk_size = std::nullopt) { // TODO(wphicks): Make sure buffer lands on same device as model auto out_buffer = raft_proto::buffer{output, num_rows * num_outputs(), out_mem_type}; auto in_buffer = raft_proto::buffer{input, num_rows * num_features(), in_mem_type}; predict(handle, out_buffer, in_buffer, predict_type, specified_chunk_size); } private: decision_forest_variant decision_forest_; }; } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/infer_kind.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { namespace experimental { namespace fil { enum class infer_kind : unsigned char { default_kind = 0, per_tree = 1, leaf_id = 2 }; } } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/Implementation.md
# FIL Implementation This document is intended to provide additional detail about this implementation of FIL to help guide future FIL contributors. Because this is the first cuML algorithm to attempt to provide a unified CPU/GPU codebase that does *not* require nvcc, CUDA or any other GPU-related library for its CPU-only build, we also go over general strategies for CPU/GPU interoperability as used by FIL. **A NOTE ON THE `raft_proto` NAMESPACE:** In addition to FIL-specific code, the new implementation requires some more general-purpose CPU-GPU interoperable utilities. Many of these utilities are either already implemented in RAFT (but do not provide the required CPU-interoperable compilation guarantees) or are a natural fit for incorporation in RAFT. In order to allow for more careful integration with the existing RAFT codebase and interoperability strategies, these utilities are currently provided in the `raft_proto` namespace but will be moved into RAFT over time. Other algorithms should not make use of the `raft_proto` namespace but instead wait until this transition has taken place. ## Design Goals 1. Provide state-of-the-art runtime performance for forest models on GPU, especially for cases where CPU performance will not suffice (e.g. large batches, deep trees, many trees, etc.). 2. Ensure that the public API is the same for both CPU and GPU execution. 3. Re-use as much code as possible between CPU and GPU implementations. 4. Provide near-state-of-the-art runtime performance for forest models on most CPUs without vendor-specific optimizations. ## Strategies for CPU/GPU code re-use This FIL implementation now makes use of a build-time variable `CUML_ENABLE_GPU` to determine whether or not to compile CUDA code. If `CUML_ENABLE_GPU` is not set, FIL is guaranteed to compile without nvcc, access to CUDA headers, or any other GPU-related library. We explicitly wish to avoid excessive use of `#ifdef` statements based on this variable, however. Interleaving CPU and GPU code via `#ifdef` branches both reduces readability and discourages writing of truly interoperable code. Ideally, `#ifdef` statements should be used solely and sparingly for conditional header inclusion. This presents additional challenges but also opportunities for a cleaner implementation of a unified CPU/GPU codebase. It is also occasionally useful to make use of a `constexpr` value indicating whether or not `CUML_ENABLE_GPU` is set, which we introduce as `raft_proto::GPU_ENABLED`. ### Avoiding CUDA symbols in CPU-only builds The most significant challenge of attempting to create a unified CPU/GPU implementation is ensuring that no CUDA symbols are exposed in the CPU-only build. To illustrate the general strategy, we will look at a specific example: the implementation of the main inference loop. Code for this loop is provided in the following four files: ``` detail/ ├─ infer.hpp # "Consumable" header ├─ infer/ # "Implementation" directory │ ├─ cpu.hpp │ ├─ gpu.cuh │ ├─ gpu.hpp ``` For brevity, we introduce the concepts of "consumable" and "implementation" headers. Consumable headers can be included in any other header and are guaranteed not to themselves include any header with CUDA symbols if `CUML_ENABLE_GPU` is not defined. Implementation headers can *only* be included by their associated consumable header or directly in a source file. They should *never* be directly included by any other consumable header except their own. By creating a clear separation of these two header types, we guarantee that any source file that includes a consumable header should be compilable with or without access to CUDA headers. Note that all public headers should be consumable, but not all consumable headers need be made public. In the particular example under consideration, `infer.hpp` is consumable, but we keep it in the detail directory to indicate that it is not part of the public API. Let's take a closer look at each of the "infer" headers. `infer.hpp` implements `detail::infer`, a function templated on both the execution device type (`D`) and the type of the forest model being evaluated `forest_t`. If we were to look at the implementation of this template, we would note that there is no code specialized for either possible value of `D`. At the level of consumable headers, we have abstracted away the difference between GPU and CPU in order to ensure that this template is completely reusable between GPU and CPU. Where we _need_ to provide distinct logic between GPU and CPU implementations, we do so in implementation headers. In `infer/cpu.hpp`, we have a fully-defined template for CPU specializations of `detail::inference::infer`. If `raft_proto::GPU_ENABLED` is `false`, we also include the GPU specializations, which will simply throw an exception if invoked. In `infer/gpu.hpp` we *declare* but do not *define* the GPU specializations. In `infer/gpu.cuh` we provide the full working definition for the GPU specializations. `infer.hpp` includes `infer/cpu.hpp` and `infer/gpu.hpp`, but *not* `infer/gpu.cuh`. Instead, `infer/gpu.cuh` is included directly in the CUDA source files that require access to the full definition. Structuring the code in this way, we have a single separation point between code that will and will not compile without access to CUDA headers. A similar approach is used anywhere else in the implementation where we need distinct logic for CPU and GPU. Otherwise, we are free to use anything defined in a consumable header without worrying about whether the current translation unit will ultimately be compiled with gcc or nvcc or whether our current build does or does not have GPU enabled. ### Re-using code Ultimately, many GPU and parallel CPU algorithms do not differ much in their actual steps, but optimizing each requires careful attention to the differing parallelism models and memory access models on each hardware type. This means that with a little care, we can separate details related to parallelism and memory access from the actual algorithm logic. This logic will be the same for both CPU and GPU, but the now-isolated parallelism and memory access code can be independently optimized. The process of actually performing this separation usually starts by identifying the basic single "task" that each parallel worker must take on. It is not always entirely obvious how granular this task should be. For instance, in the case of forest models, we might consider the basic task to be evaluating a single row with all trees in the forest, evaluating all rows with a single tree of the forest, evaluating a single row with a single tree, evaluating a single node of a tree on a single row, evaluating a sub-tree of a specific size on a single row, etc. In order to offer optimal performance on the widest range of models, the present implementation defines the underlying worker task as evaluating a single row on a single tree, but specific model characteristics (e.g. very small or large trees) might benefit from other task granularity. Once we have identified the underlying task, we implement this directly in a way that is independent of the parallelism model or memory access patterns. That is to say, we assume that we are already executing on a single worker and that the memory is arranged optimally for this task. In the current implementation, this task is defined in `detail/evaluate_tree.hpp`. Looking at this header, we should note that there is no logic specific to the GPU or CPU. Instead we defer this to `infer_kernel`, which specifies how our fundamental task gets assigned to individual "workers" (CPU threads for the CPU or CUDA threads for the GPU). This is not a necessary constraint (i.e. we could refactor later for CPU and GPU specific versions of `evaluate_tree`), but re-using code in this way and providing a clean separation from the parallelism model does offer advantages. Beyond just the reduced maintenance of a single codebase and more modular design, this gives us the opportunity to benefit from improvements in the CPU implementation on GPU and vice versa. For instance, during the initial development, only CPU tests were used to check for correctness, but GPU results were shown to be correct as soon as they were added to the tests. Similarly, during optimization, only GPU runtime and instructions were analyzed, but the process of optimizing for the GPU resulted in significant speedups (over 50% on a standard benchmark) on the CPU. ## Code Walkthrough With some motivation for the general approach to CPU-GPU interoperability, we now offer an overview of the layout of the codebase to help guide future improvements. Because `raft_proto` utilities are going to be moved to RAFT or other general-purpose libraries, we will not review anything within the `raft_proto` directory here. ### Public Headers * `constants.hpp`: Contains constant values that may be useful in working with FIL in other C++ applications * `decision_forest.hpp`: Provides `decision_forest`, a template which provides concrete implementations of a decision forest. Because different types may be optimal for different sizes of models or models with different features, we implement this template on many different combinations of template parameters. This is provided in a public header in case other applications have more specialized use cases and can afford to work directly with this concrete underlying object. * `forest_model.hpp`: Provides `forest_model`, a wrapper for a `std::variant` of all `decision_forest` implementations. This wrapper handles dispatching `predict` calls to the right underlying type. * `exceptions.hpp`: Provides definitions for all custom exceptions that might be thrown within FIL and need to be handled by an external application. * `postproc_ops.hpp`: Provides enums used to specify how leaf outputs should be processed. * `treelite_importer.hpp`: Provides `import_from_treelite_model` and `import_from_treelite_handle`, either of which can be used to convert a Treelite model to a `forest_model` object to be used for accelerated inference. ### Detail Headers * `cpu_introspection.hpp`: Provides constants and utilities to evaluate CPU capabilities for optimized performance. * `decision_forest_builder.hpp`: Provides generic tools for building FIL forests from some other source. In the current FIL codebase, the Treelite import code is the only place this is used, but it could be used to create import utilities for other sources as well. * `device_initialization.hpp`: Contains code for anything that must be done to initialize execution on a device. For GPUs, this may mean setting specific CUDA options. * `evaluate_tree.hpp`: Contains code for evaluating a single tree on input data. * `forest.hpp`: Provide the storage struct `forest` whose *sole* responsibility is to hold model data to be used for inference. * `gpu_introspection.hpp`: Provides constants and utilities to evaluate GPU capabilities for optimized performance. * `infer.hpp`: Contains wrapper code for performing inference on a `forest` object (either on CPU or GPU). This wrapper takes data that has been extracted from the `forest_model` object if necessary to control details of forest evaluation. * `node.hpp`: Provides template for an individual node of a tree. * `postprocessor.hpp`: Provides device-agnostic code for postprocessing the output of model leaves. * `specialization_types.hpp`: Defines all specializations that are used to construct instantiations of the `decision_forest` template. * `infer_kernel/`: This directory contains device-specific code that determines how `evaluate_tree` calls will be performed in parallel. * `specializations/`: Because there is a large matrix of specializations for `decision_forest`, it would be tedious and error-prone to list out all the implementations in source files. Furthermore, because these templates are complex we wish to avoid recompiling them unnecessarily. Therefore, this directory contains headers with macros for declaring the necessary implementations in source files and declaring the corresponding templates as `extern` elsewhere. Because these specializations need to be explicitly declared, this must be implemented as a macro. ### Source Files The experimental FIL source files contain no implementation details. They merely use the macros defined in `include/cuml/experimental/fil/detail/specializations` to indicate the template instantiations that must be compiled. These are broken up into an arbitrary number of source files. To improve build parallelization, they could be broken up further, or to reduce the number of source files, they could be consolidated.
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/decision_forest.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <algorithm> #include <cstddef> #include <cuml/experimental/fil/constants.hpp> #include <cuml/experimental/fil/detail/device_initialization.hpp> #include <cuml/experimental/fil/detail/forest.hpp> #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/infer.hpp> #include <cuml/experimental/fil/detail/postprocessor.hpp> #include <cuml/experimental/fil/detail/raft_proto/buffer.hpp> #include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp> #include <cuml/experimental/fil/detail/raft_proto/exceptions.hpp> #include <cuml/experimental/fil/detail/specialization_types.hpp> #include <cuml/experimental/fil/exceptions.hpp> #include <cuml/experimental/fil/infer_kind.hpp> #include <cuml/experimental/fil/postproc_ops.hpp> #include <cuml/experimental/fil/tree_layout.hpp> #include <limits> #include <optional> #include <stddef.h> #include <stdint.h> #include <variant> namespace ML { namespace experimental { namespace fil { /** * A general-purpose decision forest implementation * * This template provides an optimized but generic implementation of a decision * forest. Template parameters are used to specialize the * implementation based on the size and characteristics of the forest. * For instance, the smallest integer that can express the offset between a * parent and child node within a tree is used in order to minimize the size * of a node, increasing the number that can fit within the L2 or L1 cache. * * @tparam layout_v The in-memory layout of nodes in this forest * @tparam threshold_t The floating-point type used for quantitative splits * @tparam index_t The integer type used for storing many things within a * forest, including the category value of categorical nodes and the index at * which vector output for a leaf node is stored. * @tparam metadata_storage_t The type used for storing node metadata. * The first several bits will be used to store flags indicating various * characteristics of the node, and the remaining bits provide the integer * index of the feature for this node's split * @tparam offset_t An integer used to indicate the offset between a node and * its most distant child. This type must be large enough to store the * largest such offset in the entire forest. */ template <tree_layout layout_v, typename threshold_t, typename index_t, typename metadata_storage_t, typename offset_t> struct decision_forest { /** * The in-memory layout of nodes in this forest */ auto constexpr static const layout = layout_v; /** * The type of the forest object which is actually passed to the CPU/GPU * for inference */ using forest_type = forest<layout, threshold_t, index_t, metadata_storage_t, offset_t>; /** * The type of nodes within the forest */ using node_type = typename forest_type::node_type; /** * The type used for input and output to the model */ using io_type = typename forest_type::io_type; /** * The type used for quantitative splits within the model */ using threshold_type = threshold_t; /** * The type used to indicate how leaf output should be post-processed */ using postprocessor_type = postprocessor<io_type>; /** * The type used for storing data on categorical nodes */ using categorical_storage_type = typename node_type::index_type; /** * Construct an empty decision forest */ decision_forest() : nodes_{}, root_node_indexes_{}, node_id_mapping_{}, vector_output_{}, categorical_storage_{}, num_features_{}, num_outputs_{}, leaf_size_{}, has_categorical_nodes_{false}, row_postproc_{}, elem_postproc_{}, average_factor_{}, bias_{}, postproc_constant_{} { } /** * Construct a decision forest with the indicated data * * @param nodes A buffer containing all nodes within the forest * @param root_node_indexes A buffer containing the index of the root node * of every tree in the forest * @param node_id_mapping Mapping to use to convert FIL's internal node ID into Treelite's node * ID. Only relevant when predict_type == infer_kind::leaf_id * @param num_features The number of features per input sample for this model * @param num_outputs The number of outputs per row from this model * @param has_categorical_nodes Whether this forest contains any * categorical nodes * @param vector_output A buffer containing the output from all vector * leaves for this model. Each leaf node will specify the offset within * this buffer at which its vector output begins, and leaf_size will be * used to determine how many subsequent entries from the buffer should be * used to construct the vector output. A value of std::nullopt indicates * that this is not a vector leaf model. * @param categorical_storage For models with inputs on too many categories * to be stored in the bits of an `index_t`, it may be necessary to store * categorical information external to the node itself. This buffer * contains the necessary storage for this information. * @param leaf_size The number of output values per leaf (1 for non-vector * leaves; >1 for vector leaves) * @param row_postproc The post-processing operation to be applied to an * entire row of the model output * @param elem_postproc The per-element post-processing operation to be * applied to the model output * @param average_factor A factor which is used for output * normalization * @param bias The bias term that is applied to the output after * normalization * @param postproc_constant A constant used by some post-processing * operations, including sigmoid, exponential, and * logarithm_one_plus_exp */ decision_forest(raft_proto::buffer<node_type>&& nodes, raft_proto::buffer<index_type>&& root_node_indexes, raft_proto::buffer<index_type>&& node_id_mapping, index_type num_features, index_type num_outputs = index_type{2}, bool has_categorical_nodes = false, std::optional<raft_proto::buffer<io_type>>&& vector_output = std::nullopt, std::optional<raft_proto::buffer<typename node_type::index_type>>&& categorical_storage = std::nullopt, index_type leaf_size = index_type{1}, row_op row_postproc = row_op::disable, element_op elem_postproc = element_op::disable, io_type average_factor = io_type{1}, io_type bias = io_type{0}, io_type postproc_constant = io_type{1}) : nodes_{nodes}, root_node_indexes_{root_node_indexes}, node_id_mapping_{node_id_mapping}, vector_output_{vector_output}, categorical_storage_{categorical_storage}, num_features_{num_features}, num_outputs_{num_outputs}, leaf_size_{leaf_size}, has_categorical_nodes_{has_categorical_nodes}, row_postproc_{row_postproc}, elem_postproc_{elem_postproc}, average_factor_{average_factor}, bias_{bias}, postproc_constant_{postproc_constant} { if (nodes.memory_type() != root_node_indexes.memory_type()) { throw raft_proto::mem_type_mismatch( "Nodes and indexes of forest must both be stored on either host or device"); } if (nodes.device_index() != root_node_indexes.device_index()) { throw raft_proto::mem_type_mismatch( "Nodes and indexes of forest must both be stored on same device"); } detail::initialize_device<forest_type>(nodes.device()); } /** The number of features per row expected by the model */ auto num_features() const { return num_features_; } /** The number of trees in the model */ auto num_trees() const { return root_node_indexes_.size(); } /** Whether or not leaf nodes have vector outputs */ auto has_vector_leaves() const { return vector_output_.has_value(); } /** The number of outputs per row generated by the model for the given * type of inference */ auto num_outputs(infer_kind inference_kind = infer_kind::default_kind) const { auto result = num_outputs_; if (inference_kind == infer_kind::per_tree) { result = num_trees(); if (has_vector_leaves()) { result *= num_outputs_; } } else if (inference_kind == infer_kind::leaf_id) { result = num_trees(); } return result; } /** The operation used for postprocessing all outputs for a single row */ auto row_postprocessing() const { return row_postproc_; } // Setter for row_postprocessing void set_row_postprocessing(row_op val) { row_postproc_ = val; } /** The operation used for postprocessing each element of the output for a * single row */ auto elem_postprocessing() const { return elem_postproc_; } /** The type of memory (device/host) where the model is stored */ auto memory_type() { return nodes_.memory_type(); } /** The ID of the device on which this model is loaded */ auto device_index() { return nodes_.device_index(); } /** * Perform inference with this model * * @param[out] output The buffer where the model output should be stored. * This must be of size ROWS x num_outputs(). * @param[in] input The buffer containing the input data * @param[in] stream For GPU execution, the CUDA stream. For CPU execution, * this optional parameter can be safely omitted. * @param[in] predict_type Type of inference to perform. Defaults to summing * the outputs of all trees and produce an output per row. If set to * "per_tree", we will instead output all outputs of individual trees. * If set to "leaf_id", we will output the integer ID of the leaf node * for each tree. * @param[in] specified_rows_per_block_iter If non-nullopt, this value is * used to determine how many rows are evaluated for each inference * iteration within a CUDA block. Runtime performance is quite sensitive * to this value, but it is difficult to predict a priori, so it is * recommended to perform a search over possible values with realistic * batch sizes in order to determine the optimal value. Any power of 2 from * 1 to 32 is a valid value, and in general larger batches benefit from * larger values. */ void predict(raft_proto::buffer<typename forest_type::io_type>& output, raft_proto::buffer<typename forest_type::io_type> const& input, raft_proto::cuda_stream stream = raft_proto::cuda_stream{}, infer_kind predict_type = infer_kind::default_kind, std::optional<index_type> specified_rows_per_block_iter = std::nullopt) { if (output.memory_type() != memory_type() || input.memory_type() != memory_type()) { throw raft_proto::wrong_device_type{ "Tried to use host I/O data with model on device or vice versa"}; } if (output.device_index() != device_index() || input.device_index() != device_index()) { throw raft_proto::wrong_device{"I/O data on different device than model"}; } auto* vector_output_data = (vector_output_.has_value() ? vector_output_->data() : static_cast<io_type*>(nullptr)); auto* categorical_storage_data = (categorical_storage_.has_value() ? categorical_storage_->data() : static_cast<categorical_storage_type*>(nullptr)); switch (nodes_.device().index()) { case 0: fil::detail::infer(obj(), get_postprocessor(predict_type), output.data(), input.data(), index_type(input.size() / num_features_), num_features_, num_outputs(predict_type), has_categorical_nodes_, vector_output_data, categorical_storage_data, predict_type, specified_rows_per_block_iter, std::get<0>(nodes_.device()), stream); break; case 1: fil::detail::infer(obj(), get_postprocessor(predict_type), output.data(), input.data(), index_type(input.size() / num_features_), num_features_, num_outputs(predict_type), has_categorical_nodes_, vector_output_data, categorical_storage_data, predict_type, specified_rows_per_block_iter, std::get<1>(nodes_.device()), stream); break; } } private: /** The nodes for all trees in the forest */ raft_proto::buffer<node_type> nodes_; /** The index of the root node for each tree in the forest */ raft_proto::buffer<index_type> root_node_indexes_; /** Mapping to apply to node IDs. Only relevant when predict_type == infer_kind::leaf_id */ raft_proto::buffer<index_type> node_id_mapping_; /** Buffer of outputs for all leaves in vector-leaf models */ std::optional<raft_proto::buffer<io_type>> vector_output_; /** Buffer of elements used as backing data for bitsets which specify * categories for all categorical nodes in the model. */ std::optional<raft_proto::buffer<categorical_storage_type>> categorical_storage_; // Metadata index_type num_features_; index_type num_outputs_; index_type leaf_size_; bool has_categorical_nodes_ = false; // Postprocessing constants row_op row_postproc_; element_op elem_postproc_; io_type average_factor_; io_type bias_; io_type postproc_constant_; auto obj() const { return forest_type{nodes_.data(), root_node_indexes_.data(), node_id_mapping_.data(), static_cast<index_type>(root_node_indexes_.size()), num_outputs_}; } auto get_postprocessor(infer_kind inference_kind = infer_kind::default_kind) const { auto result = postprocessor_type{}; if (inference_kind == infer_kind::default_kind) { result = postprocessor_type{ row_postproc_, elem_postproc_, average_factor_, bias_, postproc_constant_}; } return result; } auto leaf_size() const { return leaf_size_; } }; namespace detail { /** * A convenience wrapper to simplify template instantiation of * decision_forest * * This template takes the large range of available template parameters * and reduces them to just three standard choices. * * @tparam layout The in-memory layout of nodes in this forest * @tparam double_precision Whether this model should use double-precision * for floating-point evaluation and 64-bit integers for indexes * @tparam large_trees Whether this forest expects more than 2**(16 -3) - 1 = * 8191 features or contains nodes whose child is offset more than 2**16 - 1 = 65535 nodes away. */ template <tree_layout layout, bool double_precision, bool large_trees> using preset_decision_forest = decision_forest< layout, typename specialization_types<layout, double_precision, large_trees>::threshold_type, typename specialization_types<layout, double_precision, large_trees>::index_type, typename specialization_types<layout, double_precision, large_trees>::metadata_type, typename specialization_types<layout, double_precision, large_trees>::offset_type>; } // namespace detail /** A variant containing all standard decision_forest instantiations */ using decision_forest_variant = std::variant<detail::preset_decision_forest< std::variant_alternative_t<0, detail::specialization_variant>::layout, std::variant_alternative_t<0, detail::specialization_variant>::is_double_precision, std::variant_alternative_t<0, detail::specialization_variant>::has_large_trees>, detail::preset_decision_forest< std::variant_alternative_t<1, detail::specialization_variant>::layout, std::variant_alternative_t<1, detail::specialization_variant>::is_double_precision, std::variant_alternative_t<1, detail::specialization_variant>::has_large_trees>, detail::preset_decision_forest< std::variant_alternative_t<2, detail::specialization_variant>::layout, std::variant_alternative_t<2, detail::specialization_variant>::is_double_precision, std::variant_alternative_t<2, detail::specialization_variant>::has_large_trees>, detail::preset_decision_forest< std::variant_alternative_t<3, detail::specialization_variant>::layout, std::variant_alternative_t<3, detail::specialization_variant>::is_double_precision, std::variant_alternative_t<3, detail::specialization_variant>::has_large_trees>, detail::preset_decision_forest< std::variant_alternative_t<4, detail::specialization_variant>::layout, std::variant_alternative_t<4, detail::specialization_variant>::is_double_precision, std::variant_alternative_t<4, detail::specialization_variant>::has_large_trees>, detail::preset_decision_forest< std::variant_alternative_t<5, detail::specialization_variant>::layout, std::variant_alternative_t<5, detail::specialization_variant>::is_double_precision, std::variant_alternative_t<5, detail::specialization_variant>::has_large_trees>, detail::preset_decision_forest< std::variant_alternative_t<6, detail::specialization_variant>::layout, std::variant_alternative_t<6, detail::specialization_variant>::is_double_precision, std::variant_alternative_t<6, detail::specialization_variant>::has_large_trees>, detail::preset_decision_forest< std::variant_alternative_t<7, detail::specialization_variant>::layout, std::variant_alternative_t<7, detail::specialization_variant>::is_double_precision, std::variant_alternative_t<7, detail::specialization_variant>::has_large_trees>>; /** * Determine the variant index of the decision_forest type to used based on * model characteristics * * @param use_double_thresholds Whether single or double-precision floating * point values should be used for quantitative splits * @param max_node_offset The largest offset between a parent node and either * of its children * @param num_features The number of input features per row * @param num_categorical_nodes The total number of categorical nodes in the * forest * @param max_num_categories The maximum number of categories in any * categorical feature used by the model * @param num_vector_leaves The total number of leaf nodes which produce vector * outputs. For non-vector-leaf models, this should be 0. For vector-leaf * models, this should be the total number of leaf nodes. * @param layout The in-memory layout to be used for nodes in the forest */ inline auto get_forest_variant_index(bool use_double_thresholds, index_type max_node_offset, index_type num_features, index_type num_categorical_nodes = index_type{}, index_type max_num_categories = index_type{}, index_type num_vector_leaves = index_type{}, tree_layout layout = preferred_tree_layout) { using small_index_t = typename detail::specialization_types<preferred_tree_layout, false, false>::index_type; auto max_local_categories = index_type(sizeof(small_index_t) * 8); // If the index required for pointing to categorical storage bins or vector // leaf output exceeds what we can store in a uint32_t, uint64_t will be used // // TODO(wphicks): We are overestimating categorical storage required here auto double_indexes_required = (max_num_categories > max_local_categories && ((raft_proto::ceildiv(max_num_categories, max_local_categories) + 1 * num_categorical_nodes) > std::numeric_limits<small_index_t>::max())) || num_vector_leaves > std::numeric_limits<small_index_t>::max(); auto double_precision = use_double_thresholds || double_indexes_required; using small_metadata_t = typename detail::specialization_types<preferred_tree_layout, false, false>::metadata_type; using small_offset_t = typename detail::specialization_types<preferred_tree_layout, false, false>::offset_type; auto large_trees = (num_features > (std::numeric_limits<small_metadata_t>::max() >> reserved_node_metadata_bits) || max_node_offset > std::numeric_limits<small_offset_t>::max()); auto layout_value = static_cast<std::underlying_type_t<tree_layout>>(layout); return ((index_type{layout_value} << index_type{2}) + (index_type{double_precision} << index_type{1}) + index_type{large_trees}); } } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/README.md
# Forest Inference Library (FIL) RAPIDS Forest Inference Library (FIL) provides accelerated inference for tree-based machine learning models. Unlike packages like XGBoost, LightGBM, or even Scikit-Learn/cuML's random forest implementations, FIL cannot be used to _train_ forest models. Instead, its goal is to speed up inference using forest models trained by all of those packages. This directory contains an experimental new implementation of FIL which provides both CPU and GPU execution. Its GPU implementation also offers improved performance relative to the existing implementation in many but not all cases. For Python usage information and more extensive information on parameter-tuning and other end-user functionality, check out TODO(wphicks). This document will focus on the C++ implementation, offering details on both how to use FIL as a library and how to work with it as a FIL contributor. ## C++ Usage All headers required to make use of FIL in another C++ project are available in the top-level include directory. The `detail` directory contains implementation details that are not required to use FIL and which will certainly change over time. **A NOTE ON THE `raft_proto` NAMESPACE:** For the first iteration of this FIL implementation, much of the more general-purpose CPU-GPU interoperable code has temporarily been put in the `raft_proto` namespace. As the name suggests, the intention is that most or all of this functionality will either be moved to RAFT or that RAFT features will be updated to provide CPU-GPU compatible versions of the same. The public API includes just a few references to this namespace, so be prepared to shift these references to the corresponding RAFT symbols as this version of FIL progresses out of experimental. ### Importing a model FIL uses Treelite as a common translation layer for all its input types. To load a forest model, we first create a Treelite model handle as follows. Here, we use an XGBoost JSON model as an example, but Treelite has similar load methods for each of the serialization formats it supports. ```cpp auto filename = "xgboost.json"; auto tl_model = treelite::frontend::LoadXGBoostModel(filename); ``` We then import the Treelite model into FIL via the `import_from_treelite_model` function. All arguments except the first are optional, but we show them all here for illustration. ```cpp auto stream = cudaStream_t{}; checkCuda(cudaStreamCreate(&stream)); auto fil_model = import_from_treelite_model( *tl_model, // The Treelite model tree_layout::depth_first, // layout 128u, // align_bytes false, // use_double_precision raft_proto::device_type::gpu, // mem_type 0, // device_id stream // CUDA stream ); ``` **layout:** The in-memory layout of nodes in the model. Depending on the model, either `depth_first` or `breadth_first` may offer better performance. In general, shallow trees benefit from a `breadth_first` layout, and deep trees benefit from a `depth_first` layout, but this pattern is not absolute. **align_bytes:** If given a non-zero value, each tree will be padded to a size that is a multiple of this value by appending additional empty nodes. This can offer mild performance benefits by increasing the likelihood that memory reads begin on a cache line boundary. For GPU execution, a value of 128 is recommended. For most CPUs, a value of 0 is recommended, although using 64 can occasionally provide benefits. **use_double_precision**: This argument takes a `std::optional<bool>`. If `std::nullopt` is used (the default), the *native* precision of the model serialization format will be used. Otherwise, the model will be evaluated at double precision if this value is set to `true` or single precision if this value is set to `false`. **dev_type**: This argument controls where the model will be executed. If `raft_proto::device_type::gpu`, then it will be executed on GPU. If `raft_proto::device_type::cpu`, then it will be executed on CPU. **device_id**: This integer indicates the ID of the GPU which should be used. If CPU is being used, this argument is ignored. **stream**: The CUDA stream which will be used for the actual model import. If CPU is being used, this argument is ignored. Note that you do *not* need CUDA headers if you are working with a CPU-only build of FIL. This argument uses a `raft_proto::cuda_stream` type which evaluates to a placeholder type in CPU-only builds. For applications which themselves want to implement CPU-GPU interoperable builds, the `raft_proto::cuda_stream` type can be used directly. ### Inference The `import_from_treelite_model` function will return a `forest_model` object. This object has several `predict` methods that can be used to return inference results for the model. We will describe here the one most likely to be used by external applications: ```cpp auto num_rows = std::size_t{1000}; auto num_outputs = fil_model.num_outputs(); // Outputs per row auto output = static_cast<float*>(nullptr); // Loaded as single // precision, so use floats // for I/O // Allocate enough space for num_outputs floats per row cudaMalloc((void**)&output, num_rows * num_outputs * sizeof(float)); // Assuming that input is a float* pointing to data already located on-device auto handle = raft_proto::handle_t{}; fil_model.predict( handle, output, input, num_rows, raft_proto::device_type::gpu, // out_mem_type raft_proto::device_type::gpu, // in_mem_type 4 // chunk_size ); ``` **handle**: To provide a unified interface on CPU and GPU, we introduce `raft_proto::handle_t` as a wrapper for `raft::handle_t`. This is currently just a placeholder in CPU-only builds, and using it does not require any CUDA functionality. For GPU-enabled builds, you can construct a `raft_proto_handle_t` directly from the `raft::handle_t` you wish to use. **output**: Pointer to pre-allocated buffer where results should be written. If the model has been loaded at single precision, this should be a `float*`. If the model has been loaded at double precision, this should be a `double*`. **input**: Pointer to the input data (in C-major order). If the model has been loaded at single precision, this should be a `float*`. If the model has been loaded at double precision, this should be a `double*`. **num_rows**: The number of input rows. **out_mem_type**: Indicates whether output buffer is on device or host. **in_mem_type**: Indicates whether input buffer is on device or host. **chunk_size**: This value has a somewhat different meaning for CPU and GPU, but it generally indicates the number of rows which are evaluated in a single iteration of FIL's forest evaluation algorithm. On GPU, any power of 2 from 1 to 32 may be used for this value, and *in general* larger batches benefit from higher values. Optimizing this value can make an *enormous* difference in performance and depends on both the model and hardware used to run it. On CPU, this parameter can take on any value, but powers of 2 between 1 and 512 are recommended. A default value of 64 is generally a safe choice, unless the batch size is less than 64, in which case a smaller value is recommended. In general, larger batch sizes benefit from higher chunk size values. This argument is a `std::optional`, and if `std::nullopt` is passed, a chunk size will be selected based on heuristics. ## Learning More While the above usage summary should be enough to get started using FIL in another C++ application, you can learn more about the details of this implementation by reading TODO(wphicks).
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/tree_layout.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { namespace experimental { namespace fil { enum class tree_layout : unsigned char { depth_first = 0, breadth_first = 1 }; } } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/constants.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/tree_layout.hpp> namespace ML { namespace experimental { namespace fil { /** * The default memory layout for FIL trees if not otherwise specified */ auto constexpr static const preferred_tree_layout = tree_layout::breadth_first; /** * The number of bits used for flags in node metadata * * Each node in a FIL tree must specify the feature used for its split in * addition to some other basic information. The feature ID is "packed" * with a few flags in order to reduce the size of the node. This constant * indicates how many leading bits are reserved for flags to allow import * functions to assess how much space is required for the whole metadata * field. */ auto constexpr static const reserved_node_metadata_bits = 3; } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/postproc_ops.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { namespace experimental { namespace fil { /** Enum representing possible row-wise operations on output */ enum struct row_op : unsigned char { disable = 0b00100000, softmax = 0b01000000, max_index = 0b10000000 }; /** Enum representing possible element-wise operations on output */ enum struct element_op : unsigned char { disable = 0b00000000, signed_square = 0b00000001, hinge = 0b00000010, sigmoid = 0b00000100, exponential = 0b00001000, logarithm_one_plus_exp = 0b00010000 }; } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/treelite_importer.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cmath> #include <cstddef> #include <cuml/experimental/fil/constants.hpp> #include <cuml/experimental/fil/decision_forest.hpp> #include <cuml/experimental/fil/detail/decision_forest_builder.hpp> #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/exceptions.hpp> #include <cuml/experimental/fil/forest_model.hpp> #include <cuml/experimental/fil/postproc_ops.hpp> #include <cuml/experimental/fil/tree_layout.hpp> #include <queue> #include <stack> #include <treelite/c_api.h> #include <treelite/tree.h> #include <treelite/typeinfo.h> namespace ML { namespace experimental { namespace fil { namespace detail { /** A template for storing nodes in either a depth or breadth-first traversal */ template <tree_layout layout, typename T> struct traversal_container { using backing_container_t = std::conditional_t<layout == tree_layout::depth_first, std::stack<T>, std::queue<T>>; void add(T const& val) { data_.push(val); } void add(T const& hot, T const& distant) { if constexpr (layout == tree_layout::depth_first) { data_.push(distant); data_.push(hot); } else { data_.push(hot); data_.push(distant); } } auto next() { if constexpr (std::is_same_v<backing_container_t, std::stack<T>>) { auto result = data_.top(); data_.pop(); return result; } else { auto result = data_.front(); data_.pop(); return result; } } auto peek() { if constexpr (std::is_same_v<backing_container_t, std::stack<T>>) { return data_.top(); } else { return data_.front(); } } [[nodiscard]] auto empty() { return data_.empty(); } auto size() { return data_.size(); } private: backing_container_t data_; }; struct postproc_params_t { element_op element = element_op::disable; row_op row = row_op::disable; double constant = 1.0; }; } // namespace detail /** * Struct used to import a model from Treelite to FIL * * @tparam layout The in-memory layout for nodes to be loaded into FIL */ template <tree_layout layout> struct treelite_importer { template <typename tl_threshold_t, typename tl_output_t> struct treelite_node { treelite::Tree<tl_threshold_t, tl_output_t> const& tree; int node_id; index_type parent_index; index_type own_index; auto is_leaf() { return tree.IsLeaf(node_id); } auto get_output() { auto result = std::vector<tl_output_t>{}; if (tree.HasLeafVector(node_id)) { result = tree.LeafVector(node_id); } else { result.push_back(tree.LeafValue(node_id)); } return result; } auto get_categories() { return tree.MatchingCategories(node_id); } auto get_feature() { return tree.SplitIndex(node_id); } auto is_categorical() { return tree.SplitType(node_id) == treelite::SplitFeatureType::kCategorical; } auto default_distant() { auto result = false; auto default_child = tree.DefaultChild(node_id); if (is_categorical()) { if (tree.CategoriesListRightChild(node_id)) { result = (default_child == tree.RightChild(node_id)); } else { result = (default_child == tree.LeftChild(node_id)); } } else { auto tl_operator = tree.ComparisonOp(node_id); if (tl_operator == treelite::Operator::kLT || tl_operator == treelite::Operator::kLE) { result = (default_child == tree.LeftChild(node_id)); } else { result = (default_child == tree.RightChild(node_id)); } } return result; } auto threshold() { return tree.Threshold(node_id); } auto categories() { auto result = decltype(tree.MatchingCategories(node_id)){}; if (is_categorical()) { result = tree.MatchingCategories(node_id); } return result; } auto is_inclusive() { auto tl_operator = tree.ComparisonOp(node_id); return tl_operator == treelite::Operator::kGT || tl_operator == treelite::Operator::kLE; } }; template <typename tl_threshold_t, typename tl_output_t, typename lambda_t> void node_for_each(treelite::Tree<tl_threshold_t, tl_output_t> const& tl_tree, lambda_t&& lambda) { using node_index_t = decltype(tl_tree.LeftChild(0)); auto to_be_visited = detail::traversal_container<layout, node_index_t>{}; to_be_visited.add(node_index_t{}); auto parent_indices = detail::traversal_container<layout, index_type>{}; auto cur_index = index_type{}; parent_indices.add(cur_index); while (!to_be_visited.empty()) { auto node_id = to_be_visited.next(); auto remaining_size = to_be_visited.size(); auto tl_node = treelite_node<tl_threshold_t, tl_output_t>{ tl_tree, node_id, parent_indices.next(), cur_index}; lambda(tl_node, node_id); if (!tl_tree.IsLeaf(node_id)) { auto tl_left_id = tl_tree.LeftChild(node_id); auto tl_right_id = tl_tree.RightChild(node_id); auto tl_operator = tl_tree.ComparisonOp(node_id); if (!tl_node.is_categorical()) { if (tl_operator == treelite::Operator::kLT || tl_operator == treelite::Operator::kLE) { to_be_visited.add(tl_right_id, tl_left_id); } else if (tl_operator == treelite::Operator::kGT || tl_operator == treelite::Operator::kGE) { to_be_visited.add(tl_left_id, tl_right_id); } else { throw model_import_error("Unrecognized Treelite operator"); } } else { if (tl_tree.CategoriesListRightChild(node_id)) { to_be_visited.add(tl_left_id, tl_right_id); } else { to_be_visited.add(tl_right_id, tl_left_id); } } parent_indices.add(cur_index, cur_index); } ++cur_index; } } template <typename tl_threshold_t, typename tl_output_t, typename iter_t, typename lambda_t> void node_transform(treelite::Tree<tl_threshold_t, tl_output_t> const& tl_tree, iter_t output_iter, lambda_t&& lambda) { node_for_each(tl_tree, [&output_iter, &lambda](auto&& tl_node, int tl_node_id) { *output_iter = lambda(tl_node); ++output_iter; }); } template <typename tl_threshold_t, typename tl_output_t, typename T, typename lambda_t> auto node_accumulate(treelite::Tree<tl_threshold_t, tl_output_t> const& tl_tree, T init, lambda_t&& lambda) { auto result = init; node_for_each(tl_tree, [&result, &lambda](auto&& tl_node, int tl_node_id) { result = lambda(result, tl_node); }); return result; } template <typename tl_threshold_t, typename tl_output_t> auto get_nodes(treelite::Tree<tl_threshold_t, tl_output_t> const& tl_tree) { auto result = std::vector<treelite_node<tl_threshold_t, tl_output_t>>{}; result.reserve(tl_tree.num_nodes); node_transform(tl_tree, std::back_inserter(result), [](auto&& node) { return node; }); return result; } template <typename tl_threshold_t, typename tl_output_t> auto get_offsets(treelite::Tree<tl_threshold_t, tl_output_t> const& tl_tree) { auto result = std::vector<index_type>(tl_tree.num_nodes); auto nodes = get_nodes(tl_tree); for (auto i = index_type{}; i < nodes.size(); ++i) { // Current index should always be greater than or equal to parent index. // Later children will overwrite values set by earlier children, ensuring // that most distant offset is used. result[nodes[i].parent_index] = index_type{i - nodes[i].parent_index}; } return result; } template <typename lambda_t> void tree_for_each(treelite::Model const& tl_model, lambda_t&& lambda) { tl_model.Dispatch([&lambda](auto&& concrete_tl_model) { std::for_each(std::begin(concrete_tl_model.trees), std::end(concrete_tl_model.trees), lambda); }); } template <typename iter_t, typename lambda_t> void tree_transform(treelite::Model const& tl_model, iter_t output_iter, lambda_t&& lambda) { tl_model.Dispatch([&output_iter, &lambda](auto&& concrete_tl_model) { std::transform(std::begin(concrete_tl_model.trees), std::end(concrete_tl_model.trees), output_iter, lambda); }); } template <typename T, typename lambda_t> auto tree_accumulate(treelite::Model const& tl_model, T init, lambda_t&& lambda) { auto result = init; tree_for_each(tl_model, [&result, &lambda](auto&& tree) { result = lambda(result, tree); }); return result; } auto num_trees(treelite::Model const& tl_model) { auto result = index_type{}; tl_model.Dispatch( [&result](auto&& concrete_tl_model) { result = concrete_tl_model.trees.size(); }); return result; } auto get_offsets(treelite::Model const& tl_model) { auto result = std::vector<std::vector<index_type>>{}; result.reserve(num_trees(tl_model)); tree_transform( tl_model, std::back_inserter(result), [this](auto&& tree) { return get_offsets(tree); }); return result; } auto get_tree_sizes(treelite::Model const& tl_model) { auto result = std::vector<index_type>{}; tree_transform( tl_model, std::back_inserter(result), [](auto&& tree) { return tree.num_nodes; }); return result; } auto get_num_class(treelite::Model const& tl_model) { auto result = index_type{}; tl_model.Dispatch( [&result](auto&& concrete_tl_model) { result = concrete_tl_model.task_param.num_class; }); return result; } auto get_num_feature(treelite::Model const& tl_model) { auto result = index_type{}; tl_model.Dispatch( [&result](auto&& concrete_tl_model) { result = concrete_tl_model.num_feature; }); return result; } auto get_max_num_categories(treelite::Model const& tl_model) { return tree_accumulate(tl_model, index_type{}, [this](auto&& accum, auto&& tree) { return node_accumulate(tree, accum, [](auto&& cur_accum, auto&& tl_node) { auto result = cur_accum; for (auto&& cat : tl_node.categories()) { result = (cat + 1 > result) ? cat + 1 : result; } return result; }); }); } auto get_num_categorical_nodes(treelite::Model const& tl_model) { return tree_accumulate(tl_model, index_type{}, [this](auto&& accum, auto&& tree) { return node_accumulate(tree, accum, [](auto&& cur_accum, auto&& tl_node) { return cur_accum + tl_node.is_categorical(); }); }); } auto get_num_leaf_vector_nodes(treelite::Model const& tl_model) { return tree_accumulate(tl_model, index_type{}, [this](auto&& accum, auto&& tree) { return node_accumulate(tree, accum, [](auto&& cur_accum, auto&& tl_node) { return cur_accum + (tl_node.is_leaf() && tl_node.get_output().size() > 1); }); }); } auto get_average_factor(treelite::Model const& tl_model) { auto result = double{}; tl_model.Dispatch([&result](auto&& concrete_tl_model) { if (concrete_tl_model.average_tree_output) { if (concrete_tl_model.task_type == treelite::TaskType::kMultiClfGrovePerClass) { result = concrete_tl_model.trees.size() / concrete_tl_model.task_param.num_class; } else { result = concrete_tl_model.trees.size(); } } else { result = 1.0; } }); return result; } auto get_bias(treelite::Model const& tl_model) { auto result = double{}; tl_model.Dispatch( [&result](auto&& concrete_tl_model) { result = concrete_tl_model.param.global_bias; }); return result; } auto get_postproc_params(treelite::Model const& tl_model) { auto result = detail::postproc_params_t{}; tl_model.Dispatch([&result](auto&& concrete_tl_model) { auto tl_pred_transform = std::string{concrete_tl_model.param.pred_transform}; if (tl_pred_transform == std::string{"identity"} || tl_pred_transform == std::string{"identity_multiclass"}) { result.element = element_op::disable; result.row = row_op::disable; } else if (tl_pred_transform == std::string{"signed_square"}) { result.element = element_op::signed_square; } else if (tl_pred_transform == std::string{"hinge"}) { result.element = element_op::hinge; } else if (tl_pred_transform == std::string{"sigmoid"}) { result.constant = concrete_tl_model.param.sigmoid_alpha; result.element = element_op::sigmoid; } else if (tl_pred_transform == std::string{"exponential"}) { result.element = element_op::exponential; } else if (tl_pred_transform == std::string{"exponential_standard_ratio"}) { result.constant = -concrete_tl_model.param.ratio_c / std::log(2); result.element = element_op::exponential; } else if (tl_pred_transform == std::string{"logarithm_one_plus_exp"}) { result.element = element_op::logarithm_one_plus_exp; } else if (tl_pred_transform == std::string{"max_index"}) { result.row = row_op::max_index; } else if (tl_pred_transform == std::string{"softmax"}) { result.row = row_op::softmax; } else if (tl_pred_transform == std::string{"multiclass_ova"}) { result.constant = concrete_tl_model.param.sigmoid_alpha; result.element = element_op::sigmoid; } else { throw model_import_error{"Unrecognized Treelite pred_transform string"}; } }); return result; } auto uses_double_thresholds(treelite::Model const& tl_model) { auto result = false; switch (tl_model.GetThresholdType()) { case treelite::TypeInfo::kFloat64: result = true; break; case treelite::TypeInfo::kFloat32: result = false; break; default: throw model_import_error("Unrecognized Treelite threshold type"); } return result; } auto uses_double_outputs(treelite::Model const& tl_model) { auto result = false; switch (tl_model.GetThresholdType()) { case treelite::TypeInfo::kFloat64: result = true; break; case treelite::TypeInfo::kFloat32: result = false; break; case treelite::TypeInfo::kUInt32: result = false; break; default: throw model_import_error("Unrecognized Treelite threshold type"); } return result; } auto uses_integer_outputs(treelite::Model const& tl_model) { auto result = false; switch (tl_model.GetThresholdType()) { case treelite::TypeInfo::kFloat64: result = false; break; case treelite::TypeInfo::kFloat32: result = false; break; case treelite::TypeInfo::kUInt32: result = true; break; default: throw model_import_error("Unrecognized Treelite threshold type"); } return result; } /** * Assuming that the correct decision_forest variant has been * identified, import to that variant */ template <index_type variant_index> auto import_to_specific_variant(index_type target_variant_index, treelite::Model const& tl_model, index_type num_class, index_type num_feature, index_type max_num_categories, std::vector<std::vector<index_type>> const& offsets, index_type align_bytes = index_type{}, raft_proto::device_type mem_type = raft_proto::device_type::cpu, int device = 0, raft_proto::cuda_stream stream = raft_proto::cuda_stream{}) { auto result = decision_forest_variant{}; if constexpr (variant_index != std::variant_size_v<decision_forest_variant>) { if (variant_index == target_variant_index) { using forest_model_t = std::variant_alternative_t<variant_index, decision_forest_variant>; auto builder = detail::decision_forest_builder<forest_model_t>(max_num_categories, align_bytes); auto tree_count = num_trees(tl_model); auto tree_index = index_type{}; tree_for_each(tl_model, [this, &builder, &tree_index, &offsets](auto&& tree) { builder.start_new_tree(); auto node_index = index_type{}; node_for_each( tree, [&builder, &tree_index, &node_index, &offsets](auto&& node, int tl_node_id) { if (node.is_leaf()) { auto output = node.get_output(); builder.set_output_size(output.size()); if (output.size() > index_type{1}) { builder.add_leaf_vector_node(std::begin(output), std::end(output), tl_node_id); } else { builder.add_node(typename forest_model_t::io_type(output[0]), tl_node_id, true); } } else { if (node.is_categorical()) { auto categories = node.get_categories(); builder.add_categorical_node(std::begin(categories), std::end(categories), tl_node_id, node.default_distant(), node.get_feature(), offsets[tree_index][node_index]); } else { builder.add_node(typename forest_model_t::threshold_type(node.threshold()), tl_node_id, false, node.default_distant(), false, node.get_feature(), offsets[tree_index][node_index], node.is_inclusive()); } } ++node_index; }); ++tree_index; }); builder.set_average_factor(get_average_factor(tl_model)); builder.set_bias(get_bias(tl_model)); auto postproc_params = get_postproc_params(tl_model); builder.set_element_postproc(postproc_params.element); builder.set_row_postproc(postproc_params.row); builder.set_postproc_constant(postproc_params.constant); result.template emplace<variant_index>( builder.get_decision_forest(num_feature, num_class, mem_type, device, stream)); } else { result = import_to_specific_variant<variant_index + 1>(target_variant_index, tl_model, num_class, num_feature, max_num_categories, offsets, align_bytes, mem_type, device, stream); } } return result; } /** * Import a treelite model to FIL * * Load a model from Treelite to a FIL forest_model. The model will be * inspected to determine the correct underlying decision_forest variant to * use within the forest_model object. * * @param tl_model The Treelite Model to load * @param align_bytes If non-zero, ensure that each tree is stored in a * multiple of this value of bytes by padding with empty nodes. This can * be useful for increasing the likelihood that successive reads will take * place within a single cache line. On GPU, a value of 128 can be used for * this purpose. On CPU, a value of either 0 or 64 typically produces * optimal performance. * @param use_double_precision Whether or not to use 64 bit floats for model * evaluation and 64 bit ints for applicable indexing * @param dev_type Which device type to use for inference (CPU or GPU) * @param device For GPU execution, the device id for the device on which this * model is to be loaded * @param stream The CUDA stream to use for loading this model (can be * omitted for CPU). */ auto import(treelite::Model const& tl_model, index_type align_bytes = index_type{}, std::optional<bool> use_double_precision = std::nullopt, raft_proto::device_type dev_type = raft_proto::device_type::cpu, int device = 0, raft_proto::cuda_stream stream = raft_proto::cuda_stream{}) { auto result = decision_forest_variant{}; auto num_feature = get_num_feature(tl_model); auto max_num_categories = get_max_num_categories(tl_model); auto num_categorical_nodes = get_num_categorical_nodes(tl_model); auto num_leaf_vector_nodes = get_num_leaf_vector_nodes(tl_model); auto use_double_thresholds = use_double_precision.value_or(uses_double_thresholds(tl_model)); auto offsets = get_offsets(tl_model); auto max_offset = std::accumulate( std::begin(offsets), std::end(offsets), index_type{}, [&offsets](auto&& cur_max, auto&& tree_offsets) { return std::max(cur_max, *std::max_element(std::begin(tree_offsets), std::end(tree_offsets))); }); auto tree_sizes = std::vector<index_type>{}; std::transform(std::begin(offsets), std::end(offsets), std::back_inserter(tree_sizes), [](auto&& tree_offsets) { return tree_offsets.size(); }); auto variant_index = get_forest_variant_index(use_double_thresholds, max_offset, num_feature, num_categorical_nodes, max_num_categories, num_leaf_vector_nodes, layout); auto num_class = get_num_class(tl_model); return forest_model{import_to_specific_variant<index_type{}>(variant_index, tl_model, num_class, num_feature, max_num_categories, offsets, align_bytes, dev_type, device, stream)}; } }; /** * Import a treelite model to FIL * * Load a model from Treelite to a FIL forest_model. The model will be * inspected to determine the correct underlying decision_forest variant to * use within the forest_model object. * * @param tl_model The Treelite Model to load * @param layout The in-memory layout of nodes in the loaded forest * @param align_bytes If non-zero, ensure that each tree is stored in a * multiple of this value of bytes by padding with empty nodes. This can * be useful for increasing the likelihood that successive reads will take * place within a single cache line. On GPU, a value of 128 can be used for * this purpose. On CPU, a value of either 0 or 64 typically produces * optimal performance. * @param use_double_precision Whether or not to use 64 bit floats for model * evaluation and 64 bit ints for applicable indexing * @param dev_type Which device type to use for inference (CPU or GPU) * @param device For GPU execution, the device id for the device on which this * model is to be loaded * @param stream The CUDA stream to use for loading this model (can be * omitted for CPU). */ auto import_from_treelite_model(treelite::Model const& tl_model, tree_layout layout = preferred_tree_layout, index_type align_bytes = index_type{}, std::optional<bool> use_double_precision = std::nullopt, raft_proto::device_type dev_type = raft_proto::device_type::cpu, int device = 0, raft_proto::cuda_stream stream = raft_proto::cuda_stream{}) { auto result = forest_model{}; switch (layout) { case tree_layout::depth_first: result = treelite_importer<tree_layout::depth_first>{}.import( tl_model, align_bytes, use_double_precision, dev_type, device, stream); break; case tree_layout::breadth_first: result = treelite_importer<tree_layout::breadth_first>{}.import( tl_model, align_bytes, use_double_precision, dev_type, device, stream); break; } return result; } /** * Import a treelite model handle to FIL * * Load a model from a Treelite model handle (type-erased treelite::Model * object) to a FIL forest_model. The model will be inspected to determine the * correct underlying decision_forest variant to use within the forest_model * object. * * @param tl_handle The Treelite ModelHandle to load * @param layout The in-memory layout of nodes in the loaded forest * @param align_bytes If non-zero, ensure that each tree is stored in a * multiple of this value of bytes by padding with empty nodes. This can * be useful for increasing the likelihood that successive reads will take * place within a single cache line. On GPU, a value of 128 can be used for * this purpose. On CPU, a value of either 0 or 64 typically produces * optimal performance. * @param use_double_precision Whether or not to use 64 bit floats for model * evaluation and 64 bit ints for applicable indexing * @param dev_type Which device type to use for inference (CPU or GPU) * @param device For GPU execution, the device id for the device on which this * model is to be loaded * @param stream The CUDA stream to use for loading this model (can be * omitted for CPU). */ auto import_from_treelite_handle(ModelHandle tl_handle, tree_layout layout = preferred_tree_layout, index_type align_bytes = index_type{}, std::optional<bool> use_double_precision = std::nullopt, raft_proto::device_type dev_type = raft_proto::device_type::cpu, int device = 0, raft_proto::cuda_stream stream = raft_proto::cuda_stream{}) { return import_from_treelite_model(*static_cast<treelite::Model*>(tl_handle), layout, align_bytes, use_double_precision, dev_type, device, stream); } } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/exceptions.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <exception> #include <string> namespace ML { namespace experimental { namespace fil { /** Exception indicating model is incompatible with experimental FIL */ struct unusable_model_exception : std::exception { unusable_model_exception() : msg_{"Model is not compatible with experimental FIL"} {} unusable_model_exception(std::string msg) : msg_{msg} {} unusable_model_exception(char const* msg) : msg_{msg} {} virtual char const* what() const noexcept { return msg_.c_str(); } private: std::string msg_; }; /** Exception indicating model import failed */ struct model_import_error : std::exception { model_import_error() : model_import_error("Error while importing model") {} model_import_error(char const* msg) : msg_{msg} {} virtual char const* what() const noexcept { return msg_; } private: char const* msg_; }; /** * Exception indicating a mismatch between the type of input data and the * model * * This typically occurs when doubles are provided as input to a model with * float thresholds or vice versa. */ struct type_error : std::exception { type_error() : type_error("Model cannot be used with given data type") {} type_error(char const* msg) : msg_{msg} {} virtual char const* what() const noexcept { return msg_; } private: char const* msg_; }; } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/node.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> #include <cuml/experimental/fil/tree_layout.hpp> #include <iostream> #include <type_traits> namespace ML { namespace experimental { namespace fil { namespace detail { /* * Return the byte size to which a node with the given types should be aligned */ template <typename threshold_t, typename index_t, typename metadata_storage_t, typename offset_t> auto constexpr get_node_alignment() { auto total = index_type(std::max(sizeof(threshold_t), sizeof(index_t)) + sizeof(metadata_storage_t) + sizeof(offset_t)); auto result = index_type{8}; if (total > result) { result = index_type{16}; } if (total > result) { result = index_type{32}; } if (total > result) { result = index_type{64}; } if (total > result) { result = index_type{128}; } if (total > result) { result = index_type{256}; } if (total > result) { result = total; } return result; } } // namespace detail /* @brief A single node in a forest model * * Note that this implementation includes NO error checking for poorly-chosen * template types. If the types are not large enough to hold the required data, * an incorrect node will be silently constructed. Error checking occurs * instead at the time of construction of the entire forest. * * @tparam layout_v The layout for nodes within the forest * * @tparam threshold_t The type used as a threshold for evaluating a non-leaf * node or (when possible) the output of a leaf node. For non-categorical * nodes, if an input value is less than this threshold, the node evaluates to * true. For leaf nodes, output values will only be stored as this type if it * matches the leaf output type expected by the forest. Typically, this type is * either float or double. * * @tparam index_t The type used as an index to the output data for leaf nodes, * or to the categorical set for a categorical non-leaf node. This type should * be large enough to index the entire array of output data or categorical sets * stored in the forest. Typically, this type is either uint32_t or uint64_t. * Smaller types offer no benefit, since this value is stored in a union with * threshold_t, which is at least 32 bits. * * @tparam metadata_storage_t An unsigned integral type used for a bit-wise * representation of metadata about this node. The first three bits encode * whether or not this is a leaf node, whether or not we should default to the * more distant child in case of missing values, and whether or not this node * is categorical. The remaining bits are used to encode the feature index for * this node. Thus, uint8_t may be used for 2**(8 - 3) = 32 or fewer features, * uint16_t for 2**(16 - 3) = 8192 or fewer, and uint32_t for 536870912 or * fewer features. * * @tparam offset_t An integral type used to indicate the offset from * this node to its most distant child. This type must be large enough to store * the largest such offset in the forest model. */ template <tree_layout layout_v, typename threshold_t, typename index_t, typename metadata_storage_t, typename offset_t> struct alignas( detail::get_node_alignment<threshold_t, index_t, metadata_storage_t, offset_t>()) node { // @brief An alias for layout_v auto constexpr static const layout = layout_v; // @brief An alias for threshold_t using threshold_type = threshold_t; // @brief An alias for index_t using index_type = index_t; /* @brief A union to hold either a threshold value or index * * All nodes will need EITHER a threshold value, an output value, OR an index * to data elsewhere that wil be used either for evaluating the node (e.g. an * index to a categorical set) or creating an output (e.g. an index to vector * leaf output). This union allows us to store either of these values without * using additional space for the unused value. */ union value_type { threshold_t value; index_t index; }; /// @brief An alias for metadata_storage_t using metadata_storage_type = metadata_storage_t; /// @brief An alias for offset_t using offset_type = offset_t; // TODO(wphicks): Add custom type to ensure given child offset is at least // one #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wnarrowing" HOST DEVICE constexpr node(threshold_type value = threshold_type{}, bool is_leaf_node = true, bool default_to_distant_child = false, bool is_categorical_node = false, metadata_storage_type feature = metadata_storage_type{}, offset_type distant_child_offset = offset_type{}) : aligned_data{ .inner_data = { {.value = value}, distant_child_offset, construct_metadata(is_leaf_node, default_to_distant_child, is_categorical_node, feature)}} { } HOST DEVICE constexpr node(index_type index, bool is_leaf_node = true, bool default_to_distant_child = false, bool is_categorical_node = false, metadata_storage_type feature = metadata_storage_type{}, offset_type distant_child_offset = offset_type{}) : aligned_data{ .inner_data = { {.index = index}, distant_child_offset, construct_metadata(is_leaf_node, default_to_distant_child, is_categorical_node, feature)}} { } #pragma GCC diagnostic pop /* The index of the feature for this node */ HOST DEVICE auto constexpr feature_index() const { return aligned_data.inner_data.metadata & FEATURE_MASK; } /* Whether or not this node is a leaf node */ HOST DEVICE auto constexpr is_leaf() const { return !bool(aligned_data.inner_data.distant_offset); } /* Whether or not to default to distant child in case of missing values */ HOST DEVICE auto constexpr default_distant() const { return bool(aligned_data.inner_data.metadata & DEFAULT_DISTANT_MASK); } /* Whether or not this node is a categorical node */ HOST DEVICE auto constexpr is_categorical() const { return bool(aligned_data.inner_data.metadata & CATEGORICAL_MASK); } /* The offset to the child of this node if it evaluates to given condition */ HOST DEVICE auto constexpr child_offset(bool condition) const { if constexpr (layout == tree_layout::depth_first) { return offset_type{1} + condition * (aligned_data.inner_data.distant_offset - offset_type{1}); } else if constexpr (layout == tree_layout::breadth_first) { return condition * offset_type{1} + (aligned_data.inner_data.distant_offset - offset_type{1}); } else { static_assert(layout == tree_layout::depth_first); } } /* The threshold value for this node */ HOST DEVICE auto constexpr threshold() const { return aligned_data.inner_data.stored_value.value; } /* The index value for this node */ HOST DEVICE auto const& index() const { return aligned_data.inner_data.stored_value.index; } /* The output value for this node * * @tparam output_t The expected output type for this node. */ template <bool has_vector_leaves> HOST DEVICE auto constexpr output() const { if constexpr (has_vector_leaves) { return aligned_data.inner_data.stored_value.index; } else { return aligned_data.inner_data.stored_value.value; } } private: /* Define all bit masks required to extract information from the stored * metadata. The first bit tells us whether or not this is a leaf node, the * second tells us whether or not we should default to the distant child in * the case of a missing value, and the third tells us whether or not this is * a categorical node. The remaining bits indicate the index of the feature * for this node */ auto constexpr static const LEAF_BIT = metadata_storage_type(index_type(sizeof(metadata_storage_type) * 8 - 1)); auto constexpr static const LEAF_MASK = metadata_storage_type(1 << LEAF_BIT); auto constexpr static const DEFAULT_DISTANT_BIT = metadata_storage_type(LEAF_BIT - 1); auto constexpr static const DEFAULT_DISTANT_MASK = metadata_storage_type(1 << DEFAULT_DISTANT_BIT); auto constexpr static const CATEGORICAL_BIT = metadata_storage_type(DEFAULT_DISTANT_BIT - 1); auto constexpr static const CATEGORICAL_MASK = metadata_storage_type(1 << CATEGORICAL_BIT); auto constexpr static const FEATURE_MASK = metadata_storage_type(~(LEAF_MASK | DEFAULT_DISTANT_MASK | CATEGORICAL_MASK)); // Helper function for bit packing with the above masks auto static constexpr construct_metadata(bool is_leaf_node = true, bool default_to_distant_child = false, bool is_categorical_node = false, metadata_storage_type feature = metadata_storage_type{}) { return metadata_storage_type( (is_leaf_node << LEAF_BIT) + (default_to_distant_child << DEFAULT_DISTANT_BIT) + (is_categorical_node << CATEGORICAL_BIT) + (feature & FEATURE_MASK)); } auto static constexpr const byte_size = detail::get_node_alignment<threshold_t, index_t, metadata_storage_t, offset_t>(); struct inner_data_type { value_type stored_value; // TODO (wphicks): It may be possible to store both of the following together // to save bytes offset_type distant_offset; metadata_storage_type metadata; }; union aligned_data_type { inner_data_type inner_data; char spacer_data[byte_size]; }; aligned_data_type aligned_data; }; } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/postprocessor.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #ifndef __CUDACC__ #include <math.h> #endif #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> #include <cuml/experimental/fil/postproc_ops.hpp> #include <limits> #include <stddef.h> #include <type_traits> namespace ML { namespace experimental { namespace fil { /* Convert the postprocessing operations into a single value * representing what must be done in the inference kernel */ HOST DEVICE inline auto constexpr ops_to_val(row_op row_wise, element_op elem_wise) { return (static_cast<std::underlying_type_t<row_op>>(row_wise) | static_cast<std::underlying_type_t<element_op>>(elem_wise)); } /* * Perform postprocessing on raw forest output * * @param val Pointer to the raw forest output * @param output_count The number of output values per row * @param out Pointer to the output buffer * @param stride Number of elements between the first element that must be * summed for a particular output element and the next. This is typically * equal to the number of "groves" of trees over which the computation * was divided. * @param average_factor The factor by which to divide during the * normalization step of postprocessing * @param bias The bias factor to subtract off during the * normalization step of postprocessing * @param constant If the postprocessing operation requires a constant, * it can be passed here. */ template <row_op row_wise_v, element_op elem_wise_v, typename io_t> HOST DEVICE void postprocess(io_t* val, index_type output_count, io_t* out, index_type stride = index_type{1}, io_t average_factor = io_t{1}, io_t bias = io_t{0}, io_t constant = io_t{1}) { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-but-set-variable" auto max_index = index_type{}; auto max_value = std::numeric_limits<io_t>::lowest(); #pragma GCC diagnostic pop for (auto output_index = index_type{}; output_index < output_count; ++output_index) { auto workspace_index = output_index * stride; val[workspace_index] = val[workspace_index] / average_factor + bias; if constexpr (elem_wise_v == element_op::signed_square) { val[workspace_index] = copysign(val[workspace_index] * val[workspace_index], val[workspace_index]); } else if constexpr (elem_wise_v == element_op::hinge) { val[workspace_index] = io_t(val[workspace_index] > io_t{}); } else if constexpr (elem_wise_v == element_op::sigmoid) { val[workspace_index] = io_t{1} / (io_t{1} + exp(-constant * val[workspace_index])); } else if constexpr (elem_wise_v == element_op::exponential) { val[workspace_index] = exp(val[workspace_index] / constant); } else if constexpr (elem_wise_v == element_op::logarithm_one_plus_exp) { val[workspace_index] = log1p(exp(val[workspace_index] / constant)); } if constexpr (row_wise_v == row_op::softmax || row_wise_v == row_op::max_index) { auto is_new_max = val[workspace_index] > max_value; max_index = is_new_max * output_index + (!is_new_max) * max_index; max_value = is_new_max * val[workspace_index] + (!is_new_max) * max_value; } } if constexpr (row_wise_v == row_op::max_index) { *out = max_index; } else { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-but-set-variable" auto softmax_normalization = io_t{}; #pragma GCC diagnostic pop if constexpr (row_wise_v == row_op::softmax) { for (auto workspace_index = index_type{}; workspace_index < output_count * stride; workspace_index += stride) { val[workspace_index] = exp(val[workspace_index] - max_value); softmax_normalization += val[workspace_index]; } } for (auto output_index = index_type{}; output_index < output_count; ++output_index) { auto workspace_index = output_index * stride; if constexpr (row_wise_v == row_op::softmax) { out[output_index] = val[workspace_index] / softmax_normalization; } else { out[output_index] = val[workspace_index]; } } } } /* * Struct which holds all data necessary to perform postprocessing on raw * output of a forest model * * @tparam io_t The type used for input and output to/from the model * (typically float/double) * @param row_wise Enum value representing the row-wise post-processing * operation to perform on the output * @param elem_wise Enum value representing the element-wise post-processing * operation to perform on the output * @param average_factor The factor by which to divide during the * normalization step of postprocessing * @param bias The bias factor to subtract off during the * normalization step of postprocessing * @param constant If the postprocessing operation requires a constant, * it can be passed here. */ template <typename io_t> struct postprocessor { HOST DEVICE postprocessor(row_op row_wise = row_op::disable, element_op elem_wise = element_op::disable, io_t average_factor = io_t{1}, io_t bias = io_t{0}, io_t constant = io_t{1}) : average_factor_{average_factor}, bias_{bias}, constant_{constant}, row_wise_{row_wise}, elem_wise_{elem_wise} { } HOST DEVICE void operator()(io_t* val, index_type output_count, io_t* out, index_type stride = index_type{1}) const { switch (ops_to_val(row_wise_, elem_wise_)) { case ops_to_val(row_op::disable, element_op::signed_square): postprocess<row_op::disable, element_op::signed_square>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::disable, element_op::hinge): postprocess<row_op::disable, element_op::hinge>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::disable, element_op::sigmoid): postprocess<row_op::disable, element_op::sigmoid>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::disable, element_op::exponential): postprocess<row_op::disable, element_op::exponential>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::disable, element_op::logarithm_one_plus_exp): postprocess<row_op::disable, element_op::logarithm_one_plus_exp>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::softmax, element_op::disable): postprocess<row_op::softmax, element_op::disable>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::softmax, element_op::signed_square): postprocess<row_op::softmax, element_op::signed_square>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::softmax, element_op::hinge): postprocess<row_op::softmax, element_op::hinge>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::softmax, element_op::sigmoid): postprocess<row_op::softmax, element_op::sigmoid>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::softmax, element_op::exponential): postprocess<row_op::softmax, element_op::exponential>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::softmax, element_op::logarithm_one_plus_exp): postprocess<row_op::softmax, element_op::logarithm_one_plus_exp>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::max_index, element_op::disable): postprocess<row_op::max_index, element_op::disable>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::max_index, element_op::signed_square): postprocess<row_op::max_index, element_op::signed_square>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::max_index, element_op::hinge): postprocess<row_op::max_index, element_op::hinge>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::max_index, element_op::sigmoid): postprocess<row_op::max_index, element_op::sigmoid>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::max_index, element_op::exponential): postprocess<row_op::max_index, element_op::exponential>( val, output_count, out, stride, average_factor_, bias_, constant_); break; case ops_to_val(row_op::max_index, element_op::logarithm_one_plus_exp): postprocess<row_op::max_index, element_op::logarithm_one_plus_exp>( val, output_count, out, stride, average_factor_, bias_, constant_); break; default: postprocess<row_op::disable, element_op::disable>( val, output_count, out, stride, average_factor_, bias_, constant_); } } private: io_t average_factor_; io_t bias_; io_t constant_; row_op row_wise_; element_op elem_wise_; }; } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/index_type.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <stdint.h> namespace ML { namespace experimental { namespace fil { using index_type = uint32_t; } } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/evaluate_tree.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <stdint.h> #include <type_traits> #ifndef __CUDACC__ #include <math.h> #endif #include <cuml/experimental/fil/detail/bitset.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { /* * Evaluate a single tree on a single row. * If node_id_mapping is not-nullptr, this kernel outputs leaf node's ID * instead of the leaf value. * * @tparam has_vector_leaves Whether or not this tree has vector leaves * @tparam has_categorical_nodes Whether or not this tree has any nodes with * categorical splits * @tparam node_t The type of nodes in this tree * @tparam io_t The type used for input to and output from this tree (typically * either floats or doubles) * @tparam node_id_mapping_t If non-nullptr_t, this indicates the type we expect for * node_id_mapping. * @param node Pointer to the root node of this tree * @param row Pointer to the input data for this row * @param first_root_node Pointer to the root node of the first tree. * @param node_id_mapping Array representing the mapping from internal node IDs to * final leaf ID outputs */ template <bool has_vector_leaves, bool has_categorical_nodes, typename node_t, typename io_t, typename node_id_mapping_t = std::nullptr_t> HOST DEVICE auto evaluate_tree_impl(node_t const* __restrict__ node, io_t const* __restrict__ row, node_t const* __restrict__ first_root_node = nullptr, node_id_mapping_t node_id_mapping = nullptr) { using categorical_set_type = bitset<uint32_t, typename node_t::index_type const>; auto cur_node = *node; do { auto input_val = row[cur_node.feature_index()]; auto condition = true; if constexpr (has_categorical_nodes) { if (cur_node.is_categorical()) { auto valid_categories = categorical_set_type{ &cur_node.index(), uint32_t(sizeof(typename node_t::index_type) * 8)}; condition = valid_categories.test(input_val); } else { condition = (input_val < cur_node.threshold()); } } else { condition = (input_val < cur_node.threshold()); } if (!condition && cur_node.default_distant()) { condition = isnan(input_val); } node += cur_node.child_offset(condition); cur_node = *node; } while (!cur_node.is_leaf()); if constexpr (std::is_same_v<node_id_mapping_t, std::nullptr_t>) { return cur_node.template output<has_vector_leaves>(); } else { return node_id_mapping[node - first_root_node]; } } /* * Evaluate a single tree which requires external categorical storage on a * single node. * If node_id_mapping is not-nullptr, this kernel outputs leaf node's ID * instead of the leaf value. * * For non-categorical models and models with a relatively small number of * categories for any feature, all information necessary for model evaluation * can be stored on a single node. If the number of categories for any * feature exceeds the available space on a node, however, the * categorical split data must be stored external to the node. We pass a * pointer to this external data and reconstruct bitsets from it indicating * the positive and negative categories for each categorical node. * * @tparam has_vector_leaves Whether or not this tree has vector leaves * @tparam node_t The type of nodes in this tree * @tparam io_t The type used for input to and output from this tree (typically * either floats or doubles) * @tparam categorical_storage_t The underlying type used for storing * categorical data (typically char) * @tparam node_id_mapping_t If non-nullptr_t, this indicates the type we expect for * node_id_mapping. * @param node Pointer to the root node of this tree * @param row Pointer to the input data for this row * @param categorical_storage Pointer to where categorical split data is * stored. */ template <bool has_vector_leaves, typename node_t, typename io_t, typename categorical_storage_t, typename node_id_mapping_t = std::nullptr_t> HOST DEVICE auto evaluate_tree_impl(node_t const* __restrict__ node, io_t const* __restrict__ row, categorical_storage_t const* __restrict__ categorical_storage, node_t const* __restrict__ first_root_node = nullptr, node_id_mapping_t node_id_mapping = nullptr) { using categorical_set_type = bitset<uint32_t, categorical_storage_t const>; auto cur_node = *node; do { auto input_val = row[cur_node.feature_index()]; auto condition = cur_node.default_distant(); if (!isnan(input_val)) { if (cur_node.is_categorical()) { auto valid_categories = categorical_set_type{categorical_storage + cur_node.index() + 1, uint32_t(categorical_storage[cur_node.index()])}; condition = valid_categories.test(input_val); } else { condition = (input_val < cur_node.threshold()); } } node += cur_node.child_offset(condition); cur_node = *node; } while (!cur_node.is_leaf()); if constexpr (std::is_same_v<node_id_mapping_t, std::nullptr_t>) { return cur_node.template output<has_vector_leaves>(); } else { return node_id_mapping[node - first_root_node]; } } /** * Dispatch to an appropriate version of evaluate_tree kernel. * * @tparam has_vector_leaves Whether or not this tree has vector leaves * @tparam has_categorical_nodes Whether or not this tree has any nodes with * categorical splits * @tparam has_nonlocal_categories Whether or not this tree has any nodes that store * categorical split data externally * @tparam predict_leaf Whether to predict leaf IDs * @tparam forest_t The type of forest * @tparam io_t The type used for input to and output from this tree (typically * either floats or doubles) * @tparam categorical_data_t The type for non-local categorical data storage. * @param forest The forest used to perform inference * @param tree_index The index of the tree we are evaluating * @param row The data row we are evaluating * @param categorical_data The pointer to where non-local data on categorical splits are stored. */ template <bool has_vector_leaves, bool has_categorical_nodes, bool has_nonlocal_categories, bool predict_leaf, typename forest_t, typename io_t, typename categorical_data_t> HOST DEVICE auto evaluate_tree(forest_t const& forest, index_type tree_index, io_t const* __restrict__ row, categorical_data_t categorical_data) { using node_t = typename forest_t::node_type; if constexpr (predict_leaf) { auto leaf_node_id = index_type{}; if constexpr (has_nonlocal_categories) { leaf_node_id = evaluate_tree_impl<has_vector_leaves>(forest.get_tree_root(tree_index), row, categorical_data, forest.get_tree_root(0), forest.get_node_id_mapping()); } else { leaf_node_id = evaluate_tree_impl<has_vector_leaves, has_categorical_nodes>( forest.get_tree_root(tree_index), row, forest.get_tree_root(0), forest.get_node_id_mapping()); } return leaf_node_id; } else { auto tree_output = std::conditional_t<has_vector_leaves, typename node_t::index_type, typename node_t::threshold_type>{}; if constexpr (has_nonlocal_categories) { tree_output = evaluate_tree_impl<has_vector_leaves>( forest.get_tree_root(tree_index), row, categorical_data); } else { tree_output = evaluate_tree_impl<has_vector_leaves, has_categorical_nodes>( forest.get_tree_root(tree_index), row); } return tree_output; } } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/cpu_introspection.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <new> namespace ML { namespace experimental { namespace fil { namespace detail { #ifdef __cpplib_hardware_interference_size using std::hardware_constructive_interference_size; #else auto constexpr static const hardware_constructive_interference_size = std::size_t{64}; #endif } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/forest.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/node.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> #include <stddef.h> #include <type_traits> namespace ML { namespace experimental { namespace fil { /* A collection of trees which together form a forest model */ template <tree_layout layout_v, typename threshold_t, typename index_t, typename metadata_storage_t, typename offset_t> struct forest { using node_type = node<layout_v, threshold_t, index_t, metadata_storage_t, offset_t>; using io_type = threshold_t; template <typename vector_output_t> using raw_output_type = std::conditional_t<!std::is_same_v<vector_output_t, std::nullptr_t>, std::remove_pointer_t<vector_output_t>, typename node_type::threshold_type>; HOST DEVICE forest(node_type* forest_nodes, index_type* forest_root_indexes, index_type* node_id_mapping, index_type num_trees, index_type num_outputs) : nodes_{forest_nodes}, root_node_indexes_{forest_root_indexes}, node_id_mapping_{node_id_mapping}, num_trees_{num_trees}, num_outputs_{num_outputs} { } /* Return pointer to the root node of the indicated tree */ HOST DEVICE auto* get_tree_root(index_type tree_index) const { return nodes_ + root_node_indexes_[tree_index]; } /* Return pointer to the mapping from internal node IDs to final node ID outputs. * Only used when infer_type == infer_kind::leaf_id */ HOST DEVICE const auto* get_node_id_mapping() const { return node_id_mapping_; } /* Return the number of trees in this forest */ HOST DEVICE auto tree_count() const { return num_trees_; } /* Return the number of outputs per row for default evaluation of this * forest */ HOST DEVICE auto num_outputs() const { return num_outputs_; } private: node_type* nodes_; index_type* root_node_indexes_; index_type* node_id_mapping_; index_type num_trees_; index_type num_outputs_; }; } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/infer.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/infer/cpu.hpp> #include <cuml/experimental/fil/infer_kind.hpp> #include <iostream> #include <optional> #include <type_traits> #ifdef CUML_ENABLE_GPU #include <cuml/experimental/fil/detail/infer/gpu.hpp> #endif #include <cuml/experimental/fil/detail/postprocessor.hpp> #include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/exceptions.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { /* * Perform inference based on the given forest and input parameters * * @tparam D The device type (CPU/GPU) used to perform inference * @tparam forest_t The type of the forest * @param forest The forest to be evaluated * @param postproc The postprocessor object used to execute * postprocessing * @param output Pointer to where the output should be written * @param input Pointer to where the input data can be read from * @param row_count The number of rows in the input data * @param col_count The number of columns in the input data * @param output_count The number of outputs per row * @param has_categorical_nodes Whether or not any node within the forest has * a categorical split * @param vector_output Pointer to the beginning of storage for vector * outputs of leaves (nullptr for no vector output) * @param categorical_data Pointer to external categorical data storage if * required * @param infer_type Type of inference to perform. Defaults to summing the outputs of all trees * and produce an output per row. If set to "per_tree", we will instead output all outputs of * individual trees. If set to "leaf_id", we will output the integer ID of the leaf node * for each tree. * @param specified_chunk_size If non-nullopt, the size of "mini-batches" * used for distributing work across threads * @param device The device on which to execute evaluation * @param stream Optionally, the CUDA stream to use */ template <raft_proto::device_type D, typename forest_t> void infer(forest_t const& forest, postprocessor<typename forest_t::io_type> const& postproc, typename forest_t::io_type* output, typename forest_t::io_type* input, index_type row_count, index_type col_count, index_type output_count, bool has_categorical_nodes, typename forest_t::io_type* vector_output = nullptr, typename forest_t::node_type::index_type* categorical_data = nullptr, infer_kind infer_type = infer_kind::default_kind, std::optional<index_type> specified_chunk_size = std::nullopt, raft_proto::device_id<D> device = raft_proto::device_id<D>{}, raft_proto::cuda_stream stream = raft_proto::cuda_stream{}) { if (vector_output == nullptr) { if (categorical_data == nullptr) { if (!has_categorical_nodes) { inference::infer<D, false, forest_t, std::nullptr_t, std::nullptr_t>(forest, postproc, output, input, row_count, col_count, output_count, nullptr, nullptr, infer_type, specified_chunk_size, device, stream); } else { inference::infer<D, true, forest_t, std::nullptr_t, std::nullptr_t>(forest, postproc, output, input, row_count, col_count, output_count, nullptr, nullptr, infer_type, specified_chunk_size, device, stream); } } else { inference::infer<D, true, forest_t>(forest, postproc, output, input, row_count, col_count, output_count, nullptr, categorical_data, infer_type, specified_chunk_size, device, stream); } } else { if (categorical_data == nullptr) { if (!has_categorical_nodes) { inference::infer<D, false, forest_t>(forest, postproc, output, input, row_count, col_count, output_count, vector_output, nullptr, infer_type, specified_chunk_size, device, stream); } else { inference::infer<D, true, forest_t>(forest, postproc, output, input, row_count, col_count, output_count, vector_output, nullptr, infer_type, specified_chunk_size, device, stream); } } else { inference::infer<D, true, forest_t>(forest, postproc, output, input, row_count, col_count, output_count, vector_output, categorical_data, infer_type, specified_chunk_size, device, stream); } } } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/bitset.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #ifndef __CUDACC__ #include <math.h> #endif #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> #include <stddef.h> #include <type_traits> #include <variant> namespace ML { namespace experimental { namespace fil { namespace detail { template <typename index_t = size_t, typename storage_t = std::byte> struct bitset { using storage_type = storage_t; using index_type = index_t; auto constexpr static const bin_width = index_type(sizeof(storage_type) * 8); HOST DEVICE bitset() : data_{nullptr}, num_bits_{0} {} HOST DEVICE bitset(storage_type* data, index_type size) : data_{data}, num_bits_{size} {} HOST DEVICE bitset(storage_type* data) : data_{data}, num_bits_(sizeof(storage_type) * 8) {} HOST DEVICE auto size() const { return num_bits_; } HOST DEVICE auto bin_count() const { return num_bits_ / bin_width + (num_bits_ % bin_width != 0); } // Standard bit-wise mutators and accessor HOST DEVICE auto& set(index_type index) { data_[bin_from_index(index)] |= mask_in_bin(index); return *this; } HOST DEVICE auto& clear(index_type index) { data_[bin_from_index(index)] &= ~mask_in_bin(index); return *this; } HOST DEVICE auto test(index_type index) const { auto result = false; if (index < num_bits_) { result = ((data_[bin_from_index(index)] & mask_in_bin(index)) != 0); } return result; } HOST DEVICE auto& flip() { for (auto i = index_type{}; i < bin_count(); ++i) { data_[i] = ~data_[i]; } return *this; } // Bit-wise boolean operations HOST DEVICE auto& operator&=(bitset<storage_type> const& other) { for (auto i = index_type{}; i < min(size(), other.size()); ++i) { data_[i] &= other.data_[i]; } return *this; } HOST DEVICE auto& operator|=(bitset<storage_type> const& other) { for (auto i = index_type{}; i < min(size(), other.size()); ++i) { data_[i] |= other.data_[i]; } return *this; } HOST DEVICE auto& operator^=(bitset<storage_type> const& other) { for (auto i = index_type{}; i < min(size(), other.size()); ++i) { data_[i] ^= other.data_[i]; } return *this; } HOST DEVICE auto& operator~() const { flip(); return *this; } private: storage_type* data_; index_type num_bits_; HOST DEVICE auto mask_in_bin(index_type index) const { return storage_type{1} << (index % bin_width); } HOST DEVICE auto bin_from_index(index_type index) const { return index / bin_width; } }; } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/decision_forest_builder.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <algorithm> #include <cmath> #include <cstddef> #include <cuml/experimental/fil/detail/bitset.hpp> #include <cuml/experimental/fil/detail/forest.hpp> #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/buffer.hpp> #include <cuml/experimental/fil/detail/raft_proto/ceildiv.hpp> #include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/exceptions.hpp> #include <cuml/experimental/fil/postproc_ops.hpp> #include <numeric> #include <optional> #include <stdint.h> #include <vector> namespace ML { namespace experimental { namespace fil { namespace detail { /* * Exception indicating that FIL model could not be built from given input */ struct model_builder_error : std::exception { model_builder_error() : model_builder_error("Error while building model") {} model_builder_error(char const* msg) : msg_{msg} {} virtual char const* what() const noexcept { return msg_; } private: char const* msg_; }; /* * Struct used to build FIL forests */ template <typename decision_forest_t> struct decision_forest_builder { /* The type for nodes in the given decision_forest type */ using node_type = typename decision_forest_t::node_type; /* Add a root node, indicating the beginning of a new tree */ void start_new_tree() { if (root_node_indexes_.empty()) { root_node_indexes_.emplace_back(); } else { max_tree_size_ = std::max(cur_tree_size_, max_tree_size_); if (alignment_ != index_type{}) { if (cur_tree_size_ % alignment_ != index_type{}) { auto padding = (alignment_ - cur_tree_size_ % alignment_); for (auto i = index_type{}; i < padding; ++i) { add_node(typename node_type::threshold_type{}, std::nullopt); } } } root_node_indexes_.push_back(root_node_indexes_.back() + cur_tree_size_); cur_tree_size_ = index_type{}; } } /* Add a node with a categorical split */ template <typename iter_t> void add_categorical_node( iter_t vec_begin, iter_t vec_end, std::optional<int> tl_node_id = std::nullopt, bool default_to_distant_child = false, typename node_type::metadata_storage_type feature = typename node_type::metadata_storage_type{}, typename node_type::offset_type offset = typename node_type::offset_type{}) { auto constexpr const bin_width = index_type(sizeof(typename node_type::index_type) * 8); auto node_value = typename node_type::index_type{}; auto set_storage = &node_value; auto max_node_categories = *std::max_element(vec_begin, vec_end) + 1; if (max_num_categories_ > bin_width) { // TODO(wphicks): Check for overflow here node_value = categorical_storage_.size(); auto bins_required = raft_proto::ceildiv(max_node_categories, bin_width); categorical_storage_.push_back(max_node_categories); categorical_storage_.resize(categorical_storage_.size() + bins_required); set_storage = &(categorical_storage_[node_value + 1]); } auto set = bitset{set_storage, max_node_categories}; std::for_each(vec_begin, vec_end, [&set](auto&& cat_index) { set.set(cat_index); }); add_node(node_value, tl_node_id, false, default_to_distant_child, true, feature, offset, false); } /* Add a leaf node with vector output */ template <typename iter_t> void add_leaf_vector_node(iter_t vec_begin, iter_t vec_end, std::optional<int> tl_node_id = std::nullopt) { auto leaf_index = typename node_type::index_type(vector_output_.size() / output_size_); std::copy(vec_begin, vec_end, std::back_inserter(vector_output_)); nodes_.emplace_back(leaf_index, true, false, false, typename node_type::metadata_storage_type{}, typename node_type::offset_type{}); // 0 indicates the lack of ID mapping for a particular node node_id_mapping_.push_back(static_cast<index_type>(tl_node_id.value_or(0))); ++cur_tree_size_; } /* Add a node to the model */ template <typename value_t> void add_node( value_t val, std::optional<int> tl_node_id = std::nullopt, bool is_leaf_node = true, bool default_to_distant_child = false, bool is_categorical_node = false, typename node_type::metadata_storage_type feature = typename node_type::metadata_storage_type{}, typename node_type::offset_type offset = typename node_type::offset_type{}, bool is_inclusive = false) { if (is_inclusive) { val = std::nextafter(val, std::numeric_limits<value_t>::infinity()); } nodes_.emplace_back( val, is_leaf_node, default_to_distant_child, is_categorical_node, feature, offset); // 0 indicates the lack of ID mapping for a particular node node_id_mapping_.push_back(static_cast<index_type>(tl_node_id.value_or(0))); ++cur_tree_size_; } /* Set the element-wise postprocessing operation for this model */ void set_element_postproc(element_op val) { element_postproc_ = val; } /* Set the row-wise postprocessing operation for this model */ void set_row_postproc(row_op val) { row_postproc_ = val; } /* Set the value to divide by during postprocessing */ void set_average_factor(double val) { average_factor_ = val; } /* Set the the bias term to remove during postprocessing */ void set_bias(double val) { bias_ = val; } /* Set the the value of the constant used in the postprocessing operation * (if any) */ void set_postproc_constant(double val) { postproc_constant_ = val; } /* Set the number of outputs per row for this model */ void set_output_size(index_type val) { if (output_size_ != index_type{1} && output_size_ != val) { throw model_import_error("Inconsistent leaf vector size"); } output_size_ = val; } decision_forest_builder(index_type max_num_categories = index_type{}, index_type align_bytes = index_type{}) : cur_tree_size_{}, max_num_categories_{max_num_categories}, alignment_{std::lcm(align_bytes, index_type(sizeof(node_type)))}, output_size_{1}, element_postproc_{}, average_factor_{}, row_postproc_{}, bias_{}, postproc_constant_{}, max_tree_size_{}, nodes_{}, root_node_indexes_{}, vector_output_{} { } /* Return the FIL decision forest built by this builder */ auto get_decision_forest(index_type num_feature, index_type num_class, raft_proto::device_type mem_type = raft_proto::device_type::cpu, int device = 0, raft_proto::cuda_stream stream = raft_proto::cuda_stream{}) { // Allow narrowing for preprocessing constants. They are stored as doubles // for consistency in the builder but must be converted to the proper types // for the concrete forest model. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wnarrowing" return decision_forest_t{ raft_proto::buffer{ raft_proto::buffer{nodes_.data(), nodes_.size()}, mem_type, device, stream}, raft_proto::buffer{raft_proto::buffer{root_node_indexes_.data(), root_node_indexes_.size()}, mem_type, device, stream}, raft_proto::buffer{raft_proto::buffer{node_id_mapping_.data(), node_id_mapping_.size()}, mem_type, device, stream}, num_feature, num_class, max_num_categories_ != 0, vector_output_.empty() ? std::nullopt : std::make_optional<raft_proto::buffer<typename node_type::threshold_type>>( raft_proto::buffer{vector_output_.data(), vector_output_.size()}, mem_type, device, stream), categorical_storage_.empty() ? std::nullopt : std::make_optional<raft_proto::buffer<typename node_type::index_type>>( raft_proto::buffer{categorical_storage_.data(), categorical_storage_.size()}, mem_type, device, stream), output_size_, row_postproc_, element_postproc_, static_cast<typename node_type::threshold_type>(average_factor_), static_cast<typename node_type::threshold_type>(bias_), static_cast<typename node_type::threshold_type>(postproc_constant_)}; #pragma GCC diagnostic pop } private: index_type cur_tree_size_; index_type max_num_categories_; index_type alignment_; index_type output_size_; row_op row_postproc_; element_op element_postproc_; double average_factor_; double bias_; double postproc_constant_; index_type max_tree_size_; std::vector<node_type> nodes_; std::vector<index_type> root_node_indexes_; std::vector<typename node_type::threshold_type> vector_output_; std::vector<typename node_type::index_type> categorical_storage_; std::vector<index_type> node_id_mapping_; }; } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/gpu_introspection.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda_runtime_api.h> #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/cuda_check.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <vector> namespace ML { namespace experimental { namespace fil { namespace detail { inline auto get_max_shared_mem_per_block( raft_proto::device_id<raft_proto::device_type::gpu> device_id) { auto thread_local cache = std::vector<int>{}; if (cache.size() == 0) { auto device_count = int{}; raft_proto::cuda_check(cudaGetDeviceCount(&device_count)); cache.resize(device_count); for (auto dev = 0; dev < device_count; ++dev) { raft_proto::cuda_check( cudaDeviceGetAttribute(&(cache[dev]), cudaDevAttrMaxSharedMemoryPerBlockOptin, dev)); } } return index_type(cache.at(device_id.value())); } inline auto get_sm_count(raft_proto::device_id<raft_proto::device_type::gpu> device_id) { auto thread_local cache = std::vector<int>{}; if (cache.size() == 0) { auto device_count = int{}; raft_proto::cuda_check(cudaGetDeviceCount(&device_count)); cache.resize(device_count); for (auto dev = 0; dev < device_count; ++dev) { raft_proto::cuda_check( cudaDeviceGetAttribute(&(cache[dev]), cudaDevAttrMultiProcessorCount, dev)); } } return index_type(cache.at(device_id.value())); } inline auto get_max_threads_per_sm(raft_proto::device_id<raft_proto::device_type::gpu> device_id) { auto result = int{}; raft_proto::cuda_check( cudaDeviceGetAttribute(&result, cudaDevAttrMaxThreadsPerMultiProcessor, device_id.value())); return index_type(result); } inline auto get_max_shared_mem_per_sm(raft_proto::device_id<raft_proto::device_type::gpu> device_id) { auto thread_local cache = std::vector<int>{}; if (cache.size() == 0) { auto device_count = int{}; raft_proto::cuda_check(cudaGetDeviceCount(&device_count)); cache.resize(device_count); for (auto dev = 0; dev < device_count; ++dev) { raft_proto::cuda_check( cudaDeviceGetAttribute(&(cache[dev]), cudaDevAttrMaxSharedMemoryPerMultiprocessor, dev)); } } return index_type(cache.at(device_id.value())); } inline auto get_mem_clock_rate(raft_proto::device_id<raft_proto::device_type::gpu> device_id) { auto result = int{}; raft_proto::cuda_check( cudaDeviceGetAttribute(&result, cudaDevAttrMemoryClockRate, device_id.value())); return index_type(result); } inline auto get_core_clock_rate(raft_proto::device_id<raft_proto::device_type::gpu> device_id) { auto result = int{}; raft_proto::cuda_check(cudaDeviceGetAttribute(&result, cudaDevAttrClockRate, device_id.value())); return index_type(result); } /* The maximum number of bytes that can be read in a single instruction */ auto constexpr static const MAX_READ_CHUNK = index_type{128}; auto constexpr static const MAX_BLOCKS = index_type{65536}; auto constexpr static const WARP_SIZE = index_type{32}; auto constexpr static const MAX_THREADS_PER_BLOCK = index_type{256}; #ifdef __CUDACC__ #if __CUDA_ARCH__ == 720 || __CUDA_ARCH__ == 750 || __CUDA_ARCH__ == 860 || __CUDA_ARCH__ == 870 auto constexpr static const MAX_THREADS_PER_SM = index_type{1024}; #else auto constexpr static const MAX_THREADS_PER_SM = index_type{2048}; #endif #else auto constexpr static const MAX_THREADS_PER_SM = index_type{2048}; #endif auto constexpr static const MIN_BLOCKS_PER_SM = MAX_THREADS_PER_SM / MAX_THREADS_PER_BLOCK; } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/specialization_types.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <cstdint> #include <cuml/experimental/fil/tree_layout.hpp> #include <type_traits> #include <variant> namespace ML { namespace experimental { namespace fil { namespace detail { /* * A template used solely to help manage the types which will be compiled in * standard cuML FIL * * The relatively simple and human-readable template parameters of this * template are translated into the specific types and values required * to instantiate more complex templates and compile-time checks. * * @tparam layout_v The layout of trees within a model * @tparam double_precision Whether this model should use double-precision * for floating-point evaluation and 64-bit integers for indexes * @tparam large_trees Whether this forest expects more than 2**(16 -3) - 1 = * 8191 features or contains nodes whose child is offset more than 2**16 - 1 = 65535 nodes away. */ template <tree_layout layout_v, bool double_precision, bool large_trees> struct specialization_types { /* The node threshold type to be used based on the template parameters */ using threshold_type = std::conditional_t<double_precision, double, float>; /* The type required for specifying indexes to vector leaf outputs or * non-local categorical data. */ using index_type = std::conditional_t<double_precision, std::uint64_t, std::uint32_t>; /* The type used to provide metadata storage for nodes */ using metadata_type = std::conditional_t<large_trees, std::uint32_t, std::uint16_t>; /* The type used to provide metadata storage for nodes */ using offset_type = std::conditional_t<large_trees, std::uint32_t, std::uint16_t>; /* The tree layout (alias for layout_v)*/ auto static constexpr const layout = layout_v; /* Whether or not this tree requires double precision (alias for * double_precision) */ auto static constexpr const is_double_precision = double_precision; /* Whether or not this forest contains large trees (alias for * large_trees) */ auto static constexpr const has_large_trees = large_trees; }; /* A variant holding information on all specialization types compiled * in standard cuML FIL */ using specialization_variant = std::variant<specialization_types<tree_layout::depth_first, false, false>, specialization_types<tree_layout::depth_first, false, true>, specialization_types<tree_layout::depth_first, true, false>, specialization_types<tree_layout::depth_first, true, true>, specialization_types<tree_layout::breadth_first, false, false>, specialization_types<tree_layout::breadth_first, false, true>, specialization_types<tree_layout::breadth_first, true, false>, specialization_types<tree_layout::breadth_first, true, true>>; } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/device_initialization.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/device_initialization/cpu.hpp> #include <variant> #ifdef CUML_ENABLE_GPU #include <cuml/experimental/fil/detail/device_initialization/gpu.hpp> #endif namespace ML { namespace experimental { namespace fil { namespace detail { /* Set any required device options for optimizing FIL compute */ template <typename forest_t, raft_proto::device_type D> void initialize_device(raft_proto::device_id<D> device) { device_initialization::initialize_device<forest_t>(device); } /* Set any required device options for optimizing FIL compute */ template <typename forest_t> void initialize_device(raft_proto::device_id_variant device) { std::visit( [](auto&& concrete_device) { device_initialization::initialize_device<forest_t>(concrete_device); }, device); } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/device_type.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace raft_proto { enum class device_type { cpu, gpu }; }
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/ceildiv.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> namespace raft_proto { template <typename T, typename U> HOST DEVICE auto constexpr ceildiv(T dividend, U divisor) { return (dividend + divisor - T{1}) / divisor; } } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #ifdef CUML_ENABLE_GPU #include <cuda_runtime_api.h> #endif namespace raft_proto { #ifdef CUML_ENABLE_GPU using cuda_stream = cudaStream_t; #else using cuda_stream = int; #endif inline void synchronize(cuda_stream stream) { #ifdef CUML_ENABLE_GPU cudaStreamSynchronize(stream); #endif } } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/cuda_check.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/detail/cuda_check/base.hpp> #ifdef CUML_ENABLE_GPU #include <cuml/experimental/fil/detail/raft_proto/detail/cuda_check/gpu.hpp> #endif #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> namespace raft_proto { template <typename error_t> void cuda_check(error_t const& err) noexcept(!GPU_ENABLED) { detail::cuda_check<device_type::gpu>(err); } } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/handle.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <algorithm> #include <cstddef> #include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp> #ifdef CUML_ENABLE_GPU #include <raft/core/handle.hpp> #endif namespace raft_proto { #ifdef CUML_ENABLE_GPU struct handle_t { handle_t(raft::handle_t const* handle_ptr = nullptr) : raft_handle_{handle_ptr} {} handle_t(raft::handle_t const& raft_handle) : raft_handle_{&raft_handle} {} auto get_next_usable_stream() const { return raft_proto::cuda_stream{raft_handle_->get_next_usable_stream().value()}; } auto get_stream_pool_size() const { return raft_handle_->get_stream_pool_size(); } auto get_usable_stream_count() const { return std::max(get_stream_pool_size(), std::size_t{1}); } void synchronize() const { raft_handle_->sync_stream_pool(); raft_handle_->sync_stream(); } private: // Have to store a pointer because handle is not movable raft::handle_t const* raft_handle_; }; #else struct handle_t { auto get_next_usable_stream() const { return raft_proto::cuda_stream{}; } auto get_stream_pool_size() const { return std::size_t{}; } auto get_usable_stream_count() const { return std::max(get_stream_pool_size(), std::size_t{1}); } void synchronize() const {} }; #endif } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/gpu_support.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <exception> #include <stdint.h> namespace raft_proto { #ifdef CUML_ENABLE_GPU auto constexpr static const GPU_ENABLED = true; #else auto constexpr static const GPU_ENABLED = false; #endif #ifdef __CUDACC__ #define HOST __host__ #define DEVICE __device__ auto constexpr static const GPU_COMPILATION = true; #else #define HOST #define DEVICE auto constexpr static const GPU_COMPILATION = false; #endif #ifndef DEBUG auto constexpr static const DEBUG_ENABLED = false; #elif DEBUG == 0 auto constexpr static const DEBUG_ENABLED = false; #else auto constexpr static const DEBUG_ENABLED = true; #endif struct gpu_unsupported : std::exception { gpu_unsupported() : gpu_unsupported("GPU functionality invoked in non-GPU build") {} gpu_unsupported(char const* msg) : msg_{msg} {} virtual char const* what() const noexcept { return msg_; } private: char const* msg_; }; } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/device_id.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/detail/device_id/base.hpp> #include <cuml/experimental/fil/detail/raft_proto/detail/device_id/cpu.hpp> #ifdef CUML_ENABLE_GPU #include <cuml/experimental/fil/detail/raft_proto/detail/device_id/gpu.hpp> #endif #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <variant> namespace raft_proto { template <device_type D> using device_id = detail::device_id<D>; using device_id_variant = std::variant<device_id<device_type::cpu>, device_id<device_type::gpu>>; } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/device_setter.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/detail/device_setter/base.hpp> #ifdef CUML_ENABLE_GPU #include <cuml/experimental/fil/detail/raft_proto/detail/device_setter/gpu.hpp> #endif #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> namespace raft_proto { using device_setter = detail::device_setter<device_type::gpu>; }
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/exceptions.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <exception> namespace raft_proto { struct bad_cuda_call : std::exception { bad_cuda_call() : bad_cuda_call("CUDA API call failed") {} bad_cuda_call(char const* msg) : msg_{msg} {} virtual char const* what() const noexcept { return msg_; } private: char const* msg_; }; struct out_of_bounds : std::exception { out_of_bounds() : out_of_bounds("Attempted out-of-bounds memory access") {} out_of_bounds(char const* msg) : msg_{msg} {} virtual char const* what() const noexcept { return msg_; } private: char const* msg_; }; struct wrong_device_type : std::exception { wrong_device_type() : wrong_device_type("Attempted to use host data on GPU or device data on CPU") { } wrong_device_type(char const* msg) : msg_{msg} {} virtual char const* what() const noexcept { return msg_; } private: char const* msg_; }; struct mem_type_mismatch : std::exception { mem_type_mismatch() : mem_type_mismatch("Memory type does not match expected type") {} mem_type_mismatch(char const* msg) : msg_{msg} {} virtual char const* what() const noexcept { return msg_; } private: char const* msg_; }; struct wrong_device : std::exception { wrong_device() : wrong_device("Attempted to use incorrect device") {} wrong_device(char const* msg) : msg_{msg} {} virtual char const* what() const noexcept { return msg_; } private: char const* msg_; }; } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/buffer.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp> #include <cuml/experimental/fil/detail/raft_proto/detail/const_agnostic.hpp> #include <cuml/experimental/fil/detail/raft_proto/detail/copy.hpp> #include <cuml/experimental/fil/detail/raft_proto/detail/non_owning_buffer.hpp> #include <cuml/experimental/fil/detail/raft_proto/detail/owning_buffer.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/exceptions.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> #include <iterator> #include <memory> #include <stdint.h> #include <utility> #include <variant> namespace raft_proto { /** * @brief A container which may or may not own its own data on host or device * */ template <typename T> struct buffer { using index_type = std::size_t; using value_type = T; using data_store = std::variant<non_owning_buffer<device_type::cpu, T>, non_owning_buffer<device_type::gpu, T>, owning_buffer<device_type::cpu, T>, owning_buffer<device_type::gpu, T>>; buffer() : device_{}, data_{}, size_{}, cached_ptr{nullptr} {} /** Construct non-initialized owning buffer */ buffer(index_type size, device_type mem_type = device_type::cpu, int device = 0, cuda_stream stream = 0) : device_{[mem_type, &device]() { auto result = device_id_variant{}; switch (mem_type) { case device_type::cpu: result = device_id<device_type::cpu>{device}; break; case device_type::gpu: result = device_id<device_type::gpu>{device}; break; } return result; }()}, data_{[this, mem_type, size, stream]() { auto result = data_store{}; switch (mem_type) { case device_type::cpu: result = owning_buffer<device_type::cpu, T>{size}; break; case device_type::gpu: result = owning_buffer<device_type::gpu, T>{std::get<1>(device_), size, stream}; break; } return result; }()}, size_{size}, cached_ptr{[this]() { auto result = static_cast<T*>(nullptr); switch (data_.index()) { case 0: result = std::get<0>(data_).get(); break; case 1: result = std::get<1>(data_).get(); break; case 2: result = std::get<2>(data_).get(); break; case 3: result = std::get<3>(data_).get(); break; } return result; }()} { } /** Construct non-owning buffer */ buffer(T* input_data, index_type size, device_type mem_type = device_type::cpu, int device = 0) : device_{[mem_type, &device]() { auto result = device_id_variant{}; switch (mem_type) { case device_type::cpu: result = device_id<device_type::cpu>{device}; break; case device_type::gpu: result = device_id<device_type::gpu>{device}; break; } return result; }()}, data_{[input_data, mem_type]() { auto result = data_store{}; switch (mem_type) { case device_type::cpu: result = non_owning_buffer<device_type::cpu, T>{input_data}; break; case device_type::gpu: result = non_owning_buffer<device_type::gpu, T>{input_data}; break; } return result; }()}, size_{size}, cached_ptr{[this]() { auto result = static_cast<T*>(nullptr); switch (data_.index()) { case 0: result = std::get<0>(data_).get(); break; case 1: result = std::get<1>(data_).get(); break; case 2: result = std::get<2>(data_).get(); break; case 3: result = std::get<3>(data_).get(); break; } return result; }()} { } /** * @brief Construct one buffer from another in the given memory location * (either on host or on device) * A buffer constructed in this way is owning and will copy the data from * the original location */ buffer(buffer<T> const& other, device_type mem_type, int device = 0, cuda_stream stream = cuda_stream{}) : device_{[mem_type, &device]() { auto result = device_id_variant{}; switch (mem_type) { case device_type::cpu: result = device_id<device_type::cpu>{device}; break; case device_type::gpu: result = device_id<device_type::gpu>{device}; break; } return result; }()}, data_{[this, &other, mem_type, stream]() { auto result = data_store{}; auto result_data = static_cast<T*>(nullptr); if (mem_type == device_type::cpu) { auto buf = owning_buffer<device_type::cpu, T>(other.size()); result_data = buf.get(); result = std::move(buf); } else if (mem_type == device_type::gpu) { auto buf = owning_buffer<device_type::gpu, T>(std::get<1>(device_), other.size(), stream); result_data = buf.get(); result = std::move(buf); } copy(result_data, other.data(), other.size(), mem_type, other.memory_type(), stream); return result; }()}, size_{other.size()}, cached_ptr{[this]() { auto result = static_cast<T*>(nullptr); switch (data_.index()) { case 0: result = std::get<0>(data_).get(); break; case 1: result = std::get<1>(data_).get(); break; case 2: result = std::get<2>(data_).get(); break; case 3: result = std::get<3>(data_).get(); break; } return result; }()} { } /** * @brief Create owning copy of existing buffer with given stream * The memory type of this new buffer will be the same as the original */ buffer(buffer<T> const& other, cuda_stream stream = cuda_stream{}) : buffer(other, other.memory_type(), other.device_index(), stream) { } /** * @brief Create owning copy of existing buffer * The memory type of this new buffer will be the same as the original */ friend void swap(buffer<T>& first, buffer<T>& second) { using std::swap; swap(first.device_, second.device_); swap(first.data_, second.data_); swap(first.size_, second.size_); swap(first.cached_ptr, second.cached_ptr); } buffer<T>& operator=(buffer<T> const& other) { auto copy = other; swap(*this, copy); return *this; } /** * @brief Move from existing buffer unless a copy is necessary based on * memory location */ buffer(buffer<T>&& other, device_type mem_type, int device, cuda_stream stream) : device_{[mem_type, &device]() { auto result = device_id_variant{}; switch (mem_type) { case device_type::cpu: result = device_id<device_type::cpu>{device}; break; case device_type::gpu: result = device_id<device_type::gpu>{device}; break; } return result; }()}, data_{[&other, mem_type, device, stream]() { auto result = data_store{}; if (mem_type == other.memory_type() && device == other.device_index()) { result = std::move(other.data_); } else { auto* result_data = static_cast<T*>(nullptr); if (mem_type == device_type::cpu) { auto buf = owning_buffer<device_type::cpu, T>{other.size()}; result_data = buf.get(); result = std::move(buf); } else if (mem_type == device_type::gpu) { auto buf = owning_buffer<device_type::gpu, T>{device, other.size(), stream}; result_data = buf.get(); result = std::move(buf); } copy(result_data, other.data(), other.size(), mem_type, other.memory_type(), stream); } return result; }()}, size_{other.size()}, cached_ptr{[this]() { auto result = static_cast<T*>(nullptr); switch (data_.index()) { case 0: result = std::get<0>(data_).get(); break; case 1: result = std::get<1>(data_).get(); break; case 2: result = std::get<2>(data_).get(); break; case 3: result = std::get<3>(data_).get(); break; } return result; }()} { } buffer(buffer<T>&& other, device_type mem_type, int device) : buffer{std::move(other), mem_type, device, cuda_stream{}} { } buffer(buffer<T>&& other, device_type mem_type) : buffer{std::move(other), mem_type, 0, cuda_stream{}} { } buffer(buffer<T>&& other) noexcept : buffer{std::move(other), other.memory_type(), other.device_index(), cuda_stream{}} { } buffer<T>& operator=(buffer<T>&& other) noexcept { data_ = std::move(other.data_); device_ = std::move(other.device_); size_ = std::move(other.size_); cached_ptr = std::move(other.cached_ptr); return *this; } template < typename iter_t, typename = decltype(*std::declval<iter_t&>(), void(), ++std::declval<iter_t&>(), void())> buffer(iter_t const& begin, iter_t const& end) : buffer{static_cast<size_t>(std::distance(begin, end))} { auto index = std::size_t{}; std::for_each(begin, end, [&index, this](auto&& val) { data()[index++] = val; }); } template < typename iter_t, typename = decltype(*std::declval<iter_t&>(), void(), ++std::declval<iter_t&>(), void())> buffer(iter_t const& begin, iter_t const& end, device_type mem_type) : buffer{buffer{begin, end}, mem_type} { } template < typename iter_t, typename = decltype(*std::declval<iter_t&>(), void(), ++std::declval<iter_t&>(), void())> buffer(iter_t const& begin, iter_t const& end, device_type mem_type, int device, cuda_stream stream = cuda_stream{}) : buffer{buffer{begin, end}, mem_type, device, stream} { } auto size() const noexcept { return size_; } HOST DEVICE auto* data() const noexcept { return cached_ptr; } auto memory_type() const noexcept { auto result = device_type{}; if (device_.index() == 0) { result = device_type::cpu; } else { result = device_type::gpu; } return result; } auto device() const noexcept { return device_; } auto device_index() const noexcept { auto result = int{}; switch (device_.index()) { case 0: result = std::get<0>(device_).value(); break; case 1: result = std::get<1>(device_).value(); break; } return result; } ~buffer() = default; private: device_id_variant device_; data_store data_; index_type size_; T* cached_ptr; }; template <bool bounds_check, typename T, typename U> const_agnostic_same_t<T, U> copy(buffer<T>& dst, buffer<U> const& src, typename buffer<T>::index_type dst_offset, typename buffer<U>::index_type src_offset, typename buffer<T>::index_type size, cuda_stream stream) { if constexpr (bounds_check) { if (src.size() - src_offset < size || dst.size() - dst_offset < size) { throw out_of_bounds("Attempted copy to or from buffer of inadequate size"); } } copy(dst.data() + dst_offset, src.data() + src_offset, size, dst.memory_type(), src.memory_type(), stream); } template <bool bounds_check, typename T, typename U> const_agnostic_same_t<T, U> copy(buffer<T>& dst, buffer<U> const& src, cuda_stream stream) { copy<bounds_check>(dst, src, 0, 0, src.size(), stream); } template <bool bounds_check, typename T, typename U> const_agnostic_same_t<T, U> copy(buffer<T>& dst, buffer<U> const& src) { copy<bounds_check>(dst, src, 0, 0, src.size(), cuda_stream{}); } template <bool bounds_check, typename T, typename U> const_agnostic_same_t<T, U> copy(buffer<T>&& dst, buffer<U>&& src, typename buffer<T>::index_type dst_offset, typename buffer<U>::index_type src_offset, typename buffer<T>::index_type size, cuda_stream stream) { if constexpr (bounds_check) { if (src.size() - src_offset < size || dst.size() - dst_offset < size) { throw out_of_bounds("Attempted copy to or from buffer of inadequate size"); } } copy(dst.data() + dst_offset, src.data() + src_offset, size, dst.memory_type(), src.memory_type(), stream); } template <bool bounds_check, typename T, typename U> const_agnostic_same_t<T, U> copy(buffer<T>&& dst, buffer<U>&& src, typename buffer<T>::index_type dst_offset, cuda_stream stream) { copy<bounds_check>(dst, src, dst_offset, 0, src.size(), stream); } template <bool bounds_check, typename T, typename U> const_agnostic_same_t<T, U> copy(buffer<T>&& dst, buffer<U>&& src, cuda_stream stream) { copy<bounds_check>(dst, src, 0, 0, src.size(), stream); } template <bool bounds_check, typename T, typename U> const_agnostic_same_t<T, U> copy(buffer<T>&& dst, buffer<U>&& src) { copy<bounds_check>(dst, src, 0, 0, src.size(), cuda_stream{}); } } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/padding.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> namespace raft_proto { /* Return the value that must be added to val to equal the next multiple of * alignment greater than or equal to val */ template <typename T, typename U> HOST DEVICE auto padding_size(T val, U alignment) { auto result = val; if (alignment != 0) { auto remainder = val % alignment; result = alignment - remainder; result *= (remainder != 0); } return result; } /* Return the next multiple of alignment >= val */ template <typename T, typename U> HOST DEVICE auto padded_size(T val, U alignment) { return val + padding_size(val, alignment); } /* Return the value that must be added to val to equal the next multiple of * alignment less than or equal to val */ template <typename T, typename U> HOST DEVICE auto downpadding_size(T val, U alignment) { auto result = val; if (alignment != 0) { result = val % alignment; } return result; } /* Return the next multiple of alignment <= val */ template <typename T, typename U> HOST DEVICE auto downpadded_size(T val, U alignment) { return val - downpadding_size(val, alignment); } } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/host_only_throw.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/detail/host_only_throw/base.hpp> #include <cuml/experimental/fil/detail/raft_proto/detail/host_only_throw/cpu.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> namespace raft_proto { template <typename T, bool host = !GPU_COMPILATION> using host_only_throw = detail::host_only_throw<T, host>; }
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/const_agnostic.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <type_traits> namespace raft_proto { template <typename T, typename U, typename V = void> using const_agnostic_same_t = std::enable_if_t<std::is_same_v<std::remove_const_t<T>, std::remove_const_t<U>>, V>; template <typename T, typename U> inline constexpr auto const_agnostic_same_v = std::is_same_v<std::remove_const_t<T>, std::remove_const_t<U>>; } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/owning_buffer.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/detail/owning_buffer/cpu.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #ifdef CUML_ENABLE_GPU #include <cuml/experimental/fil/detail/raft_proto/detail/owning_buffer/gpu.hpp> #endif namespace raft_proto { template <device_type D, typename T> using owning_buffer = detail::owning_buffer<D, T>; }
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/copy.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp> #include <cuml/experimental/fil/detail/raft_proto/detail/copy/cpu.hpp> #include <stdint.h> #ifdef CUML_ENABLE_GPU #include <cuml/experimental/fil/detail/raft_proto/detail/copy/gpu.hpp> #endif #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> namespace raft_proto { template <device_type dst_type, device_type src_type, typename T> void copy(T* dst, T const* src, uint32_t size, uint32_t dst_offset, uint32_t src_offset) { detail::copy<dst_type, src_type, T>(dst + dst_offset, src + src_offset, size, cuda_stream{}); } template <device_type dst_type, device_type src_type, typename T> void copy( T* dst, T const* src, uint32_t size, uint32_t dst_offset, uint32_t src_offset, cuda_stream stream) { detail::copy<dst_type, src_type, T>(dst + dst_offset, src + src_offset, size, stream); } template <device_type dst_type, device_type src_type, typename T> void copy(T* dst, T const* src, uint32_t size) { detail::copy<dst_type, src_type, T>(dst, src, size, cuda_stream{}); } template <device_type dst_type, device_type src_type, typename T> void copy(T* dst, T const* src, uint32_t size, cuda_stream stream) { detail::copy<dst_type, src_type, T>(dst, src, size, stream); } template <typename T> void copy(T* dst, T const* src, uint32_t size, device_type dst_type, device_type src_type, uint32_t dst_offset, uint32_t src_offset, cuda_stream stream) { if (dst_type == device_type::gpu && src_type == device_type::gpu) { detail::copy<device_type::gpu, device_type::gpu, T>( dst + dst_offset, src + src_offset, size, stream); } else if (dst_type == device_type::cpu && src_type == device_type::cpu) { detail::copy<device_type::cpu, device_type::cpu, T>( dst + dst_offset, src + src_offset, size, stream); } else if (dst_type == device_type::gpu && src_type == device_type::cpu) { detail::copy<device_type::gpu, device_type::cpu, T>( dst + dst_offset, src + src_offset, size, stream); } else if (dst_type == device_type::cpu && src_type == device_type::gpu) { detail::copy<device_type::cpu, device_type::gpu, T>( dst + dst_offset, src + src_offset, size, stream); } } template <typename T> void copy(T* dst, T const* src, uint32_t size, device_type dst_type, device_type src_type) { copy<T>(dst, src, size, dst_type, src_type, 0, 0, cuda_stream{}); } template <typename T> void copy(T* dst, T const* src, uint32_t size, device_type dst_type, device_type src_type, cuda_stream stream) { copy<T>(dst, src, size, dst_type, src_type, 0, 0, stream); } } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/non_owning_buffer.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/detail/non_owning_buffer/base.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> namespace raft_proto { template <device_type D, typename T> using non_owning_buffer = detail::non_owning_buffer<D, T>; }
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/copy/gpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda_runtime_api.h> #include <cuml/experimental/fil/detail/raft_proto/cuda_check.hpp> #include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> #include <stdint.h> #include <type_traits> namespace raft_proto { namespace detail { template <device_type dst_type, device_type src_type, typename T> std::enable_if_t< std::conjunction_v<std::disjunction<std::bool_constant<dst_type == device_type::gpu>, std::bool_constant<src_type == device_type::gpu>>, std::bool_constant<GPU_ENABLED>>, void> copy(T* dst, T const* src, uint32_t size, cuda_stream stream) { raft_proto::cuda_check(cudaMemcpyAsync(dst, src, size * sizeof(T), cudaMemcpyDefault, stream)); } } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/copy/cpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <algorithm> #include <cstring> #include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> #include <stdint.h> namespace raft_proto { namespace detail { template <device_type dst_type, device_type src_type, typename T> std::enable_if_t<std::conjunction_v<std::bool_constant<dst_type == device_type::cpu>, std::bool_constant<src_type == device_type::cpu>>, void> copy(T* dst, T const* src, uint32_t size, cuda_stream stream) { std::copy(src, src + size, dst); } template <device_type dst_type, device_type src_type, typename T> std::enable_if_t< std::conjunction_v<std::disjunction<std::bool_constant<dst_type != device_type::cpu>, std::bool_constant<src_type != device_type::cpu>>, std::bool_constant<!GPU_ENABLED>>, void> copy(T* dst, T const* src, uint32_t size, cuda_stream stream) { throw gpu_unsupported("Copying from or to device in non-GPU build"); } } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/device_id/base.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> namespace raft_proto { namespace detail { template <device_type D> struct device_id { using value_type = int; device_id(value_type device_index) {} auto value() const { return value_type{}; } }; } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/device_id/gpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/cuda_check.hpp> #include <cuml/experimental/fil/detail/raft_proto/detail/device_id/base.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <rmm/cuda_device.hpp> namespace raft_proto { namespace detail { template <> struct device_id<device_type::gpu> { using value_type = typename rmm::cuda_device_id::value_type; device_id() noexcept(false) : id_{[]() { auto raw_id = value_type{}; raft_proto::cuda_check(cudaGetDevice(&raw_id)); return raw_id; }()} {}; device_id(value_type dev_id) noexcept : id_{dev_id} {}; auto value() const noexcept { return id_.value(); } private: rmm::cuda_device_id id_; }; } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/device_id/cpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/detail/device_id/base.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> namespace raft_proto { namespace detail { template <> struct device_id<device_type::cpu> { using value_type = int; device_id() : id_{value_type{}} {}; device_id(value_type dev_id) : id_{dev_id} {}; auto value() const noexcept { return id_; } private: value_type id_; }; } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/owning_buffer/base.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <type_traits> namespace raft_proto { namespace detail { template <device_type D, typename T> struct owning_buffer { owning_buffer() {} owning_buffer(device_id<D> device_id, std::size_t size, cuda_stream stream) {} auto* get() const { return static_cast<T*>(nullptr); } }; } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/owning_buffer/gpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda_runtime_api.h> #include <cuml/experimental/fil/detail/raft_proto/detail/owning_buffer/base.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_setter.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <rmm/device_buffer.hpp> #include <type_traits> namespace raft_proto { namespace detail { template <typename T> struct owning_buffer<device_type::gpu, T> { // TODO(wphicks): Assess need for buffers of const T using value_type = std::remove_const_t<T>; owning_buffer() : data_{} {} owning_buffer(device_id<device_type::gpu> device_id, std::size_t size, cudaStream_t stream) noexcept(false) : data_{[&device_id, &size, &stream]() { auto device_context = device_setter{device_id}; return rmm::device_buffer{size * sizeof(value_type), rmm::cuda_stream_view{stream}}; }()} { } auto* get() const { return reinterpret_cast<T*>(data_.data()); } private: mutable rmm::device_buffer data_; }; } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/owning_buffer/cpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/detail/owning_buffer/base.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <memory> #include <type_traits> namespace raft_proto { namespace detail { template <typename T> struct owning_buffer<device_type::cpu, T> { // TODO(wphicks): Assess need for buffers of const T using value_type = std::remove_const_t<T>; owning_buffer() : data_{std::unique_ptr<T[]>{nullptr}} {} owning_buffer(std::size_t size) : data_{std::make_unique<T[]>(size)} {} auto* get() const { return data_.get(); } private: // TODO(wphicks): Back this with RMM-allocated host memory std::unique_ptr<T[]> data_; }; } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/non_owning_buffer/base.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <memory> #include <type_traits> namespace raft_proto { namespace detail { template <device_type D, typename T> struct non_owning_buffer { // TODO(wphicks): Assess need for buffers of const T using value_type = std::remove_const_t<T>; non_owning_buffer() : data_{nullptr} {} non_owning_buffer(T* ptr) : data_{ptr} {} auto* get() const { return data_; } private: // TODO(wphicks): Back this with RMM-allocated host memory T* data_; }; } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/cuda_check/base.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> namespace raft_proto { namespace detail { template <device_type D, typename error_t> void cuda_check(error_t const& err) { } } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/cuda_check/gpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda_runtime_api.h> #include <cuml/experimental/fil/detail/raft_proto/detail/cuda_check/base.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/exceptions.hpp> namespace raft_proto { namespace detail { template <> inline void cuda_check<device_type::gpu, cudaError_t>(cudaError_t const& err) noexcept(false) { if (err != cudaSuccess) { cudaGetLastError(); throw bad_cuda_call(cudaGetErrorString(err)); } } } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/host_only_throw/base.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> namespace raft_proto { namespace detail { template <typename T, bool host> struct host_only_throw { template <typename... Args> host_only_throw(Args&&... args) { static_assert(host); // Do not allow constexpr branch to compile if !host } }; } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/host_only_throw/cpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/detail/host_only_throw/base.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> namespace raft_proto { namespace detail { template <typename T> struct host_only_throw<T, true> { template <typename... Args> host_only_throw(Args&&... args) noexcept(false) { throw T{std::forward<Args>(args)...}; } }; } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/device_setter/base.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> namespace raft_proto { namespace detail { /** Struct for setting current device within a code block */ template <device_type D> struct device_setter { device_setter(device_id<D> device) {} }; } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/raft_proto/detail/device_setter/gpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda_runtime_api.h> #include <cuml/experimental/fil/detail/raft_proto/cuda_check.hpp> #include <cuml/experimental/fil/detail/raft_proto/detail/device_setter/base.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <raft/util/cudart_utils.hpp> namespace raft_proto { namespace detail { /** Struct for setting current device within a code block */ template <> struct device_setter<device_type::gpu> { device_setter(raft_proto::device_id<device_type::gpu> device) noexcept(false) : prev_device_{[]() { auto result = int{}; raft_proto::cuda_check(cudaGetDevice(&result)); return result; }()} { raft_proto::cuda_check(cudaSetDevice(device.value())); } ~device_setter() { RAFT_CUDA_TRY_NO_THROW(cudaSetDevice(prev_device_.value())); } private: device_id<device_type::gpu> prev_device_; }; } // namespace detail } // namespace raft_proto
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/device_initialization/gpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_setter.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> #include <type_traits> namespace ML { namespace experimental { namespace fil { namespace detail { namespace device_initialization { /* Non-CUDA header declaration of the GPU specialization for device * initialization */ template <typename forest_t, raft_proto::device_type D> std::enable_if_t<std::conjunction_v<std::bool_constant<raft_proto::GPU_ENABLED>, std::bool_constant<D == raft_proto::device_type::gpu>>, void> initialize_device(raft_proto::device_id<D> device); } // namespace device_initialization } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/device_initialization/cpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> #include <type_traits> namespace ML { namespace experimental { namespace fil { namespace detail { namespace device_initialization { /* Specialization for any initialization required for CPUs * * This specialization will also be used for non-GPU-enabled builds * (as a GPU no-op). */ template <typename forest_t, raft_proto::device_type D> std::enable_if_t<std::disjunction_v<std::bool_constant<!raft_proto::GPU_ENABLED>, std::bool_constant<D == raft_proto::device_type::cpu>>, void> initialize_device(raft_proto::device_id<D> device) { } } // namespace device_initialization } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/device_initialization/gpu.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda_runtime_api.h> #include <cuml/experimental/fil/constants.hpp> #include <cuml/experimental/fil/detail/forest.hpp> #include <cuml/experimental/fil/detail/gpu_introspection.hpp> #include <cuml/experimental/fil/detail/infer_kernel/gpu.cuh> #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_setter.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> #include <cuml/experimental/fil/detail/specializations/device_initialization_macros.hpp> #include <type_traits> namespace ML { namespace experimental { namespace fil { namespace detail { namespace device_initialization { /* The implementation of the template used to initialize GPU device options * * On GPU-enabled builds, the GPU specialization of this template ensures that * the inference kernels have access to the maximum available dynamic shared * memory. */ template <typename forest_t, raft_proto::device_type D> std::enable_if_t<std::conjunction_v<std::bool_constant<raft_proto::GPU_ENABLED>, std::bool_constant<D == raft_proto::device_type::gpu>>, void> initialize_device(raft_proto::device_id<D> device) { auto device_context = raft_proto::device_setter(device); auto max_shared_mem_per_block = get_max_shared_mem_per_block(device); // Run solely for side-effect of caching SM count get_sm_count(device); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<false, 1, forest_t, std::nullptr_t, std::nullptr_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute(infer_kernel<false, 2, forest_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute(infer_kernel<false, 4, forest_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute(infer_kernel<false, 8, forest_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute(infer_kernel<false, 16, forest_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute(infer_kernel<false, 32, forest_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<false, 1, forest_t, typename forest_t::io_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<false, 2, forest_t, typename forest_t::io_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<false, 4, forest_t, typename forest_t::io_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<false, 8, forest_t, typename forest_t::io_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<false, 16, forest_t, typename forest_t::io_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<false, 32, forest_t, typename forest_t::io_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute(infer_kernel<true, 1, forest_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute(infer_kernel<true, 2, forest_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute(infer_kernel<true, 4, forest_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute(infer_kernel<true, 8, forest_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute(infer_kernel<true, 16, forest_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute(infer_kernel<true, 32, forest_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<true, 1, forest_t, typename forest_t::io_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<true, 2, forest_t, typename forest_t::io_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<true, 4, forest_t, typename forest_t::io_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<true, 8, forest_t, typename forest_t::io_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<true, 16, forest_t, typename forest_t::io_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<true, 32, forest_t, typename forest_t::io_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute( infer_kernel<true, 1, forest_t, std::nullptr_t, typename forest_t::node_type::index_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute( infer_kernel<true, 2, forest_t, std::nullptr_t, typename forest_t::node_type::index_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute( infer_kernel<true, 4, forest_t, std::nullptr_t, typename forest_t::node_type::index_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute( infer_kernel<true, 8, forest_t, std::nullptr_t, typename forest_t::node_type::index_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute( infer_kernel<true, 16, forest_t, std::nullptr_t, typename forest_t::node_type::index_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute( infer_kernel<true, 32, forest_t, std::nullptr_t, typename forest_t::node_type::index_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute( infer_kernel<true, 1, forest_t, typename forest_t::io_type*, std::nullptr_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute( infer_kernel<true, 2, forest_t, typename forest_t::io_type*, std::nullptr_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute( infer_kernel<true, 4, forest_t, typename forest_t::io_type*, std::nullptr_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute( infer_kernel<true, 8, forest_t, typename forest_t::io_type*, std::nullptr_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute( infer_kernel<true, 16, forest_t, typename forest_t::io_type*, std::nullptr_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check(cudaFuncSetAttribute( infer_kernel<true, 32, forest_t, typename forest_t::io_type*, std::nullptr_t>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<true, 1, forest_t, typename forest_t::io_type*, typename forest_t::node_type::index_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<true, 2, forest_t, typename forest_t::io_type*, typename forest_t::node_type::index_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<true, 4, forest_t, typename forest_t::io_type*, typename forest_t::node_type::index_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<true, 8, forest_t, typename forest_t::io_type*, typename forest_t::node_type::index_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<true, 16, forest_t, typename forest_t::io_type*, typename forest_t::node_type::index_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); raft_proto::cuda_check( cudaFuncSetAttribute(infer_kernel<true, 32, forest_t, typename forest_t::io_type*, typename forest_t::node_type::index_type*>, cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem_per_block)); } CUML_FIL_INITIALIZE_DEVICE(extern template, 0) CUML_FIL_INITIALIZE_DEVICE(extern template, 1) CUML_FIL_INITIALIZE_DEVICE(extern template, 2) CUML_FIL_INITIALIZE_DEVICE(extern template, 3) CUML_FIL_INITIALIZE_DEVICE(extern template, 4) CUML_FIL_INITIALIZE_DEVICE(extern template, 5) CUML_FIL_INITIALIZE_DEVICE(extern template, 6) CUML_FIL_INITIALIZE_DEVICE(extern template, 7) } // namespace device_initialization } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/infer/gpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstddef> #include <cuml/experimental/fil/detail/forest.hpp> #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/postprocessor.hpp> #include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/infer_kind.hpp> #include <optional> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { /* The CUDA-free header declaration of the GPU infer template */ template <raft_proto::device_type D, bool has_categorical_nodes, typename forest_t, typename vector_output_t = std::nullptr_t, typename categorical_data_t = std::nullptr_t> std::enable_if_t<D == raft_proto::device_type::gpu, void> infer( forest_t const& forest, postprocessor<typename forest_t::io_type> const& postproc, typename forest_t::io_type* output, typename forest_t::io_type* input, index_type row_count, index_type col_count, index_type class_count, vector_output_t vector_output = nullptr, categorical_data_t categorical_data = nullptr, infer_kind infer_type = infer_kind::default_kind, std::optional<index_type> specified_chunk_size = std::nullopt, raft_proto::device_id<D> device = raft_proto::device_id<D>{}, raft_proto::cuda_stream stream = raft_proto::cuda_stream{}); } // namespace inference } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/infer/cpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <cuml/experimental/fil/constants.hpp> #include <cuml/experimental/fil/detail/cpu_introspection.hpp> #include <cuml/experimental/fil/detail/forest.hpp> #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/infer_kernel/cpu.hpp> #include <cuml/experimental/fil/detail/postprocessor.hpp> #include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> #include <cuml/experimental/fil/infer_kind.hpp> #include <optional> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { /* A wrapper around the underlying inference kernels to support dispatching to * the right kernel * * This specialization is used for CPU inference and for requests for GPU * inference on non-GPU-enabled builds. An exception will be thrown if a * request is made for GPU on inference on a non-GPU-enabled build. * * @tparam D The type of device (CPU/GPU) on which to perform inference. * @tparam has_categorical_nodes Whether or not any node in the model has * categorical splits. * @tparam vector_output_t If non-nullptr_t, the type of vector leaf output * @tparam categorical_data_t If non-nullptr_t, the type of non-local * categorical data storage * * @param forest The forest to be used for inference. * @param postproc The postprocessor object to be used for postprocessing raw * output from the forest. * @param row_count The number of rows in the input * @param col_count The number of columns per row in the input * @param output_count The number of output elements per row * @param vector_output If non-nullptr, a pointer to storage for vector leaf * outputs * @param categorical_data If non-nullptr, a pointer to non-local storage for * data on categorical splits. * @param infer_type Type of inference to perform. Defaults to summing the outputs of all trees * and produce an output per row. If set to "per_tree", we will instead output all outputs of * individual trees. If set to "leaf_id", we will output the integer ID of the leaf node * for each tree. * @param specified_chunk_size If non-nullopt, the mini-batch size used for * processing rows in a batch. For CPU inference, this essentially determines * the granularity of parallelism. A larger chunk size means that a single * thread will process more rows for its assigned trees before fetching a * new batch of rows. In general, so long as the chunk size remains much * smaller than the batch size (minimally less than the batch size divided by * the number of available cores), larger batches see improved performance with * larger chunk sizes. Unlike for GPU, any positive value is valid (up to * hardware constraints), but it is recommended to test powers of 2 from 1 * (for individual row inference) to 512 (for very large batch * inference). A value of 64 is a generally-useful default. */ template <raft_proto::device_type D, bool has_categorical_nodes, typename forest_t, typename vector_output_t = std::nullptr_t, typename categorical_data_t = std::nullptr_t> std::enable_if_t<std::disjunction_v<std::bool_constant<D == raft_proto::device_type::cpu>, std::bool_constant<!raft_proto::GPU_ENABLED>>, void> infer(forest_t const& forest, postprocessor<typename forest_t::io_type> const& postproc, typename forest_t::io_type* output, typename forest_t::io_type* input, index_type row_count, index_type col_count, index_type output_count, vector_output_t vector_output = nullptr, categorical_data_t categorical_data = nullptr, infer_kind infer_type = infer_kind::default_kind, std::optional<index_type> specified_chunk_size = std::nullopt, raft_proto::device_id<D> device = raft_proto::device_id<D>{}, raft_proto::cuda_stream = raft_proto::cuda_stream{}) { if constexpr (D == raft_proto::device_type::gpu) { throw raft_proto::gpu_unsupported("Tried to use GPU inference in CPU-only build"); } else { if (infer_type == infer_kind::leaf_id) { infer_kernel_cpu<has_categorical_nodes, true>( forest, postproc, output, input, row_count, col_count, output_count, specified_chunk_size.value_or(hardware_constructive_interference_size), hardware_constructive_interference_size, vector_output, categorical_data, infer_type); } else { infer_kernel_cpu<has_categorical_nodes, false>( forest, postproc, output, input, row_count, col_count, output_count, specified_chunk_size.value_or(hardware_constructive_interference_size), hardware_constructive_interference_size, vector_output, categorical_data, infer_type); } } } /* This macro is invoked here to declare all standard specializations of this * template as extern. This ensures that this (relatively complex) code is * compiled as few times as possible. A macro is used because ever * specialization must be explicitly declared. The final argument to the macro * references the 8 specialization variants compiled in standard cuML FIL. */ CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::cpu, 0) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::cpu, 1) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::cpu, 2) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::cpu, 3) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::cpu, 4) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::cpu, 5) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::cpu, 6) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::cpu, 7) } // namespace inference } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/infer/gpu.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <cuml/experimental/fil/constants.hpp> #include <cuml/experimental/fil/detail/forest.hpp> #include <cuml/experimental/fil/detail/gpu_introspection.hpp> #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/infer_kernel/gpu.cuh> #include <cuml/experimental/fil/detail/postprocessor.hpp> #include <cuml/experimental/fil/detail/raft_proto/buffer.hpp> #include <cuml/experimental/fil/detail/raft_proto/ceildiv.hpp> #include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/detail/raft_proto/gpu_support.hpp> #include <cuml/experimental/fil/detail/raft_proto/padding.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> #include <cuml/experimental/fil/exceptions.hpp> #include <cuml/experimental/fil/infer_kind.hpp> #include <optional> #include <type_traits> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { inline auto compute_output_size(index_type row_output_size, index_type threads_per_block, index_type rows_per_block_iteration, infer_kind infer_type = infer_kind::default_kind) { auto result = row_output_size * rows_per_block_iteration; if (infer_type == infer_kind::default_kind) { result *= raft_proto::ceildiv(threads_per_block, rows_per_block_iteration); } return result; } /* A wrapper around the underlying inference kernels to support dispatching to * the right kernel * * This specialization is used for GPU inference. It performs any necessary * computation necessary prior to kernel launch and then launches the correct * inference kernel. * * @tparam D The type of device (CPU/GPU) on which to perform inference. * @tparam has_categorical_nodes Whether or not any node in the model has * categorical splits. * @tparam vector_output_t If non-nullptr_t, the type of vector leaf output * @tparam categorical_data_t If non-nullptr_t, the type of non-local * categorical data storage * * @param forest The forest to be used for inference. * @param postproc The postprocessor object to be used for postprocessing raw * output from the forest. * @param row_count The number of rows in the input * @param col_count The number of columns per row in the input * @param output_count The number of output elements per row * @param vector_output If non-nullptr, a pointer to storage for vector leaf * outputs * @param categorical_data If non-nullptr, a pointer to non-local storage for * data on categorical splits. * @param infer_type Type of inference to perform. Defaults to summing the outputs of all trees * and produce an output per row. If set to "per_tree", we will instead output all outputs of * individual trees. If set to "leaf_id", we will output the integer ID of the leaf node * for each tree. * @param specified_chunk_size If non-nullopt, the mini-batch size used for * processing rows in a batch. For GPU inference, this determines the number of * rows that are processed per iteration of inference in a single block. It * is difficult to predict the optimal value for this parameter, but tuning it * can result in a substantial improvement in performance. The optimal * value depends on hardware, model, and batch size. Valid values are any power * of 2 from 1 to 32. */ template <raft_proto::device_type D, bool has_categorical_nodes, typename forest_t, typename vector_output_t = std::nullptr_t, typename categorical_data_t = std::nullptr_t> std::enable_if_t<D == raft_proto::device_type::gpu, void> infer( forest_t const& forest, postprocessor<typename forest_t::io_type> const& postproc, typename forest_t::io_type* output, typename forest_t::io_type* input, index_type row_count, index_type col_count, index_type output_count, vector_output_t vector_output = nullptr, categorical_data_t categorical_data = nullptr, infer_kind infer_type = infer_kind::default_kind, std::optional<index_type> specified_chunk_size = std::nullopt, raft_proto::device_id<D> device = raft_proto::device_id<D>{}, raft_proto::cuda_stream stream = raft_proto::cuda_stream{}) { using output_t = typename forest_t::template raw_output_type<vector_output_t>; auto sm_count = get_sm_count(device); auto const max_shared_mem_per_block = get_max_shared_mem_per_block(device); auto const max_shared_mem_per_sm = get_max_shared_mem_per_sm(device); auto const max_overall_shared_mem = std::min(max_shared_mem_per_block, max_shared_mem_per_sm); auto row_size_bytes = index_type(index_type(sizeof(typename forest_t::io_type) * col_count)); auto row_output_size = output_count; auto row_output_size_bytes = index_type(sizeof(typename forest_t::io_type) * row_output_size); // First determine the number of threads per block. This is the indicated // preferred value unless we cannot handle at least 1 row per block iteration // with available shared memory, in which case we must reduce the threads per // block. auto threads_per_block = min(MAX_THREADS_PER_BLOCK, raft_proto::downpadded_size( (max_shared_mem_per_block - row_size_bytes) / row_output_size_bytes, WARP_SIZE)); // If we cannot do at least a warp per block when storing input rows in // shared mem, recalculate our threads per block without input storage if (threads_per_block < WARP_SIZE) { threads_per_block = min(MAX_THREADS_PER_BLOCK, raft_proto::downpadded_size(max_shared_mem_per_block / row_output_size_bytes, WARP_SIZE)); if (threads_per_block >= WARP_SIZE) { row_size_bytes = index_type{}; // Do not store input rows in shared mem } } // If we cannot do at least a warp per block when storing output in // shared mem, recalculate our threads per block with ONLY input storage if (threads_per_block < WARP_SIZE) { threads_per_block = min(MAX_THREADS_PER_BLOCK, raft_proto::downpadded_size(max_shared_mem_per_block / row_size_bytes, WARP_SIZE)); } // If we still cannot use at least a warp per block, give up on using // shared memory and just maximize occupancy if (threads_per_block < WARP_SIZE) { threads_per_block = MAX_THREADS_PER_BLOCK; } auto const max_resident_blocks = sm_count * (get_max_threads_per_sm(device) / threads_per_block); // Compute shared memory usage based on minimum or specified // rows_per_block_iteration auto rows_per_block_iteration = specified_chunk_size.value_or(index_type{1}); auto constexpr const output_item_bytes = index_type(sizeof(output_t)); auto output_workspace_size = compute_output_size(row_output_size, threads_per_block, rows_per_block_iteration, infer_type); auto output_workspace_size_bytes = output_item_bytes * output_workspace_size; auto global_workspace = raft_proto::buffer<output_t>{}; if (output_workspace_size_bytes > max_shared_mem_per_block) { output_workspace_size_bytes = 0; row_output_size = 0; } auto shared_mem_per_block = min(rows_per_block_iteration * row_size_bytes + output_workspace_size_bytes, max_overall_shared_mem); auto resident_blocks_per_sm = min(raft_proto::ceildiv(max_shared_mem_per_sm, shared_mem_per_block), max_resident_blocks); // If caller has not specified the number of rows per block iteration, apply // the following heuristic to identify an approximately optimal value if (!specified_chunk_size.has_value() && resident_blocks_per_sm >= MIN_BLOCKS_PER_SM) { rows_per_block_iteration = index_type{32}; } if (row_output_size != 0) { do { output_workspace_size = compute_output_size( row_output_size, threads_per_block, rows_per_block_iteration, infer_type); output_workspace_size_bytes = output_item_bytes * output_workspace_size; shared_mem_per_block = (rows_per_block_iteration * row_size_bytes + output_workspace_size_bytes); if (shared_mem_per_block > max_overall_shared_mem) { rows_per_block_iteration >>= index_type{1}; } } while (shared_mem_per_block > max_overall_shared_mem && rows_per_block_iteration > 1); } shared_mem_per_block = std::min(shared_mem_per_block, max_overall_shared_mem); // Divide shared mem evenly shared_mem_per_block = std::min( max_overall_shared_mem, max_shared_mem_per_sm / (max_shared_mem_per_sm / shared_mem_per_block)); auto num_blocks = std::min(raft_proto::ceildiv(row_count, rows_per_block_iteration), MAX_BLOCKS); if (row_output_size == 0) { global_workspace = raft_proto::buffer<output_t>{ output_workspace_size * num_blocks, raft_proto::device_type::gpu, device.value(), stream}; } if (rows_per_block_iteration <= 1) { infer_kernel<has_categorical_nodes, 1> <<<num_blocks, threads_per_block, shared_mem_per_block, stream>>>(forest, postproc, output, input, row_count, col_count, output_count, shared_mem_per_block, output_workspace_size, vector_output, categorical_data, infer_type, global_workspace.data()); } else if (rows_per_block_iteration <= 2) { infer_kernel<has_categorical_nodes, 2> <<<num_blocks, threads_per_block, shared_mem_per_block, stream>>>(forest, postproc, output, input, row_count, col_count, output_count, shared_mem_per_block, output_workspace_size, vector_output, categorical_data, infer_type, global_workspace.data()); } else if (rows_per_block_iteration <= 4) { infer_kernel<has_categorical_nodes, 4> <<<num_blocks, threads_per_block, shared_mem_per_block, stream>>>(forest, postproc, output, input, row_count, col_count, output_count, shared_mem_per_block, output_workspace_size, vector_output, categorical_data, infer_type, global_workspace.data()); } else if (rows_per_block_iteration <= 8) { infer_kernel<has_categorical_nodes, 8> <<<num_blocks, threads_per_block, shared_mem_per_block, stream>>>(forest, postproc, output, input, row_count, col_count, output_count, shared_mem_per_block, output_workspace_size, vector_output, categorical_data, infer_type, global_workspace.data()); } else if (rows_per_block_iteration <= 16) { infer_kernel<has_categorical_nodes, 16> <<<num_blocks, threads_per_block, shared_mem_per_block, stream>>>(forest, postproc, output, input, row_count, col_count, output_count, shared_mem_per_block, output_workspace_size, vector_output, categorical_data, infer_type, global_workspace.data()); } else { infer_kernel<has_categorical_nodes, 32> <<<num_blocks, threads_per_block, shared_mem_per_block, stream>>>(forest, postproc, output, input, row_count, col_count, output_count, shared_mem_per_block, output_workspace_size, vector_output, categorical_data, infer_type, global_workspace.data()); } raft_proto::cuda_check(cudaGetLastError()); } /* This macro is invoked here to declare all standard specializations of this * template as extern. This ensures that this (relatively complex) code is * compiled as few times as possible. A macro is used because ever * specialization must be explicitly declared. The final argument to the macro * references the 8 specialization variants compiled in standard cuML FIL. */ CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::gpu, 0) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::gpu, 1) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::gpu, 2) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::gpu, 3) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::gpu, 4) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::gpu, 5) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::gpu, 6) CUML_FIL_INFER_ALL(extern template, raft_proto::device_type::gpu, 7) } // namespace inference } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/specializations/device_initialization_macros.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/detail/specializations/forest_macros.hpp> /* Declare device initialization function for the types specified by the given * variant index */ #define CUML_FIL_INITIALIZE_DEVICE(template_type, variant_index) \ template_type void \ initialize_device<CUML_FIL_FOREST(variant_index), raft_proto::device_type::gpu>( \ raft_proto::device_id<raft_proto::device_type::gpu>);
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/specializations/forest_macros.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/experimental/fil/constants.hpp> #include <cuml/experimental/fil/detail/forest.hpp> #include <cuml/experimental/fil/detail/specialization_types.hpp> #include <cuml/experimental/fil/tree_layout.hpp> #include <variant> /* Macro which, given a variant index, will extract the type of the * corresponding variant from the specialization_variant type. This allows us * to specify all forest variants we wish to support in one location and then * reference them by index elsewhere. */ #define CUML_FIL_SPEC(variant_index) \ std::variant_alternative_t<variant_index, fil::detail::specialization_variant> /* Macro which expands to a full declaration of a forest type corresponding to * the given variant index. */ #define CUML_FIL_FOREST(variant_index) \ forest<CUML_FIL_SPEC(variant_index)::layout, \ typename CUML_FIL_SPEC(variant_index)::threshold_type, \ typename CUML_FIL_SPEC(variant_index)::index_type, \ typename CUML_FIL_SPEC(variant_index)::metadata_type, \ typename CUML_FIL_SPEC(variant_index)::offset_type>
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/specializations/infer_macros.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <cuml/experimental/fil/constants.hpp> #include <cuml/experimental/fil/detail/forest.hpp> #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/postprocessor.hpp> #include <cuml/experimental/fil/detail/raft_proto/cuda_stream.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_id.hpp> #include <cuml/experimental/fil/detail/raft_proto/device_type.hpp> #include <cuml/experimental/fil/detail/specialization_types.hpp> #include <cuml/experimental/fil/detail/specializations/forest_macros.hpp> #include <cuml/experimental/fil/infer_kind.hpp> #include <variant> /* Macro which expands to the valid arguments to an inference call for a forest * model without vector leaves or non-local categorical data.*/ #define CUML_FIL_SCALAR_LOCAL_ARGS(dev, variant_index) \ (CUML_FIL_FOREST(variant_index) const&, \ postprocessor<CUML_FIL_SPEC(variant_index)::threshold_type> const&, \ CUML_FIL_SPEC(variant_index)::threshold_type*, \ CUML_FIL_SPEC(variant_index)::threshold_type*, \ index_type, \ index_type, \ index_type, \ std::nullptr_t, \ std::nullptr_t, \ infer_kind, \ std::optional<index_type>, \ raft_proto::device_id<dev>, \ raft_proto::cuda_stream stream) /* Macro which expands to the valid arguments to an inference call for a forest * model with vector leaves but without non-local categorical data.*/ #define CUML_FIL_VECTOR_LOCAL_ARGS(dev, variant_index) \ (CUML_FIL_FOREST(variant_index) const&, \ postprocessor<CUML_FIL_SPEC(variant_index)::threshold_type> const&, \ CUML_FIL_SPEC(variant_index)::threshold_type*, \ CUML_FIL_SPEC(variant_index)::threshold_type*, \ index_type, \ index_type, \ index_type, \ CUML_FIL_SPEC(variant_index)::threshold_type*, \ std::nullptr_t, \ infer_kind, \ std::optional<index_type>, \ raft_proto::device_id<dev>, \ raft_proto::cuda_stream stream) /* Macro which expands to the valid arguments to an inference call for a forest * model without vector leaves but with non-local categorical data.*/ #define CUML_FIL_SCALAR_NONLOCAL_ARGS(dev, variant_index) \ (CUML_FIL_FOREST(variant_index) const&, \ postprocessor<CUML_FIL_SPEC(variant_index)::threshold_type> const&, \ CUML_FIL_SPEC(variant_index)::threshold_type*, \ CUML_FIL_SPEC(variant_index)::threshold_type*, \ index_type, \ index_type, \ index_type, \ std::nullptr_t, \ CUML_FIL_SPEC(variant_index)::index_type*, \ infer_kind, \ std::optional<index_type>, \ raft_proto::device_id<dev>, \ raft_proto::cuda_stream stream) /* Macro which expands to the valid arguments to an inference call for a forest * model with vector leaves and with non-local categorical data.*/ #define CUML_FIL_VECTOR_NONLOCAL_ARGS(dev, variant_index) \ (CUML_FIL_FOREST(variant_index) const&, \ postprocessor<CUML_FIL_SPEC(variant_index)::threshold_type> const&, \ CUML_FIL_SPEC(variant_index)::threshold_type*, \ CUML_FIL_SPEC(variant_index)::threshold_type*, \ index_type, \ index_type, \ index_type, \ CUML_FIL_SPEC(variant_index)::threshold_type*, \ CUML_FIL_SPEC(variant_index)::index_type*, \ infer_kind, \ std::optional<index_type>, \ raft_proto::device_id<dev>, \ raft_proto::cuda_stream stream) /* Macro which expands to the declaration of an inference template for a forest * of the type indicated by the variant index */ #define CUML_FIL_INFER_TEMPLATE(template_type, dev, variant_index, categorical) \ template_type void infer<dev, categorical, CUML_FIL_FOREST(variant_index)> /* Macro which expands to the declaration of an inference template for a forest * of the type indicated by the variant index on the given device type without * vector leaves or categorical nodes*/ #define CUML_FIL_INFER_DEV_SCALAR_LEAF_NO_CAT(template_type, dev, variant_index) \ CUML_FIL_INFER_TEMPLATE(template_type, dev, variant_index, false) \ CUML_FIL_SCALAR_LOCAL_ARGS(dev, variant_index); /* Macro which expands to the declaration of an inference template for a forest * of the type indicated by the variant index on the given device type without * vector leaves and with only local categorical nodes*/ #define CUML_FIL_INFER_DEV_SCALAR_LEAF_LOCAL_CAT(template_type, dev, variant_index) \ CUML_FIL_INFER_TEMPLATE(template_type, dev, variant_index, true) \ CUML_FIL_SCALAR_LOCAL_ARGS(dev, variant_index); /* Macro which expands to the declaration of an inference template for a forest * of the type indicated by the variant index on the given device type without * vector leaves and with non-local categorical nodes*/ #define CUML_FIL_INFER_DEV_SCALAR_LEAF_NONLOCAL_CAT(template_type, dev, variant_index) \ CUML_FIL_INFER_TEMPLATE(template_type, dev, variant_index, true) \ CUML_FIL_SCALAR_NONLOCAL_ARGS(dev, variant_index); /* Macro which expands to the declaration of an inference template for a forest * of the type indicated by the variant index on the given device type with * vector leaves and without categorical nodes*/ #define CUML_FIL_INFER_DEV_VECTOR_LEAF_NO_CAT(template_type, dev, variant_index) \ CUML_FIL_INFER_TEMPLATE(template_type, dev, variant_index, false) \ CUML_FIL_VECTOR_LOCAL_ARGS(dev, variant_index); /* Macro which expands to the declaration of an inference template for a forest * of the type indicated by the variant index on the given device type with * vector leaves and with only local categorical nodes*/ #define CUML_FIL_INFER_DEV_VECTOR_LEAF_LOCAL_CAT(template_type, dev, variant_index) \ CUML_FIL_INFER_TEMPLATE(template_type, dev, variant_index, true) \ CUML_FIL_VECTOR_LOCAL_ARGS(dev, variant_index); /* Macro which expands to the declaration of an inference template for a forest * of the type indicated by the variant index on the given device type with * vector leaves and with non-local categorical nodes*/ #define CUML_FIL_INFER_DEV_VECTOR_LEAF_NONLOCAL_CAT(template_type, dev, variant_index) \ CUML_FIL_INFER_TEMPLATE(template_type, dev, variant_index, true) \ CUML_FIL_VECTOR_NONLOCAL_ARGS(dev, variant_index); /* Macro which expands to the declaration of all valid inference templates for * the given device on the forest type specified by the given variant index */ #define CUML_FIL_INFER_ALL(template_type, dev, variant_index) \ CUML_FIL_INFER_DEV_SCALAR_LEAF_NO_CAT(template_type, dev, variant_index) \ CUML_FIL_INFER_DEV_SCALAR_LEAF_LOCAL_CAT(template_type, dev, variant_index) \ CUML_FIL_INFER_DEV_SCALAR_LEAF_NONLOCAL_CAT(template_type, dev, variant_index) \ CUML_FIL_INFER_DEV_VECTOR_LEAF_NO_CAT(template_type, dev, variant_index) \ CUML_FIL_INFER_DEV_VECTOR_LEAF_LOCAL_CAT(template_type, dev, variant_index) \ CUML_FIL_INFER_DEV_VECTOR_LEAF_NONLOCAL_CAT(template_type, dev, variant_index)
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/infer_kernel/cpu.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <cuml/experimental/fil/detail/cpu_introspection.hpp> #include <cuml/experimental/fil/detail/evaluate_tree.hpp> #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/postprocessor.hpp> #include <cuml/experimental/fil/detail/raft_proto/ceildiv.hpp> #include <cuml/experimental/fil/infer_kind.hpp> #include <iostream> #include <new> #include <numeric> #include <vector> namespace ML { namespace experimental { namespace fil { namespace detail { /** * The CPU "kernel" used to actually perform forest inference * * @tparam has_categorical_nodes Whether or not this kernel should be * compiled to operate on trees with categorical nodes. * @tparam forest_t The type of the forest object which will be used for * inference. * @tparam vector_output_t If non-nullptr_t, this indicates the type we expect * for outputs from vector leaves. * @tparam categorical_data_t If non-nullptr_t, this indicates the type we * expect for non-local categorical data storage. * @param forest The forest used to perform inference * @param postproc The postprocessor object used to store all necessary * data for postprocessing * @param output Pointer to the host-accessible buffer where output * should be written * @param input Pointer to the host-accessible buffer where input should be * read from * @param row_count The number of rows in the input * @param col_count The number of columns per row in the input * @param num_outputs The expected number of output elements per row * @param chunk_size The number of rows for each thread to process with its * assigned trees before fetching a new set of trees/rows. * @param grove_size The number of trees to assign to a thread for each chunk * of rows it processes. * @param vector_output_p If non-nullptr, a pointer to the stored leaf * vector outputs for all leaf nodes * @param categorical_data If non-nullptr, a pointer to where non-local * data on categorical splits are stored. * @param infer_type Type of inference to perform. Defaults to summing the outputs of all trees * and produce an output per row. If set to "per_tree", we will instead output all outputs of * individual trees. If set to "leaf_id", we will output the integer ID of the leaf node * for each tree. */ template <bool has_categorical_nodes, bool predict_leaf, typename forest_t, typename vector_output_t = std::nullptr_t, typename categorical_data_t = std::nullptr_t> void infer_kernel_cpu(forest_t const& forest, postprocessor<typename forest_t::io_type> const& postproc, typename forest_t::io_type* output, typename forest_t::io_type const* input, index_type row_count, index_type col_count, index_type num_outputs, index_type chunk_size = hardware_constructive_interference_size, index_type grove_size = hardware_constructive_interference_size, vector_output_t vector_output_p = nullptr, categorical_data_t categorical_data = nullptr, infer_kind infer_type = infer_kind::default_kind) { auto constexpr has_vector_leaves = !std::is_same_v<vector_output_t, std::nullptr_t>; auto constexpr has_nonlocal_categories = !std::is_same_v<categorical_data_t, std::nullptr_t>; using node_t = typename forest_t::node_type; using output_t = typename forest_t::template raw_output_type<vector_output_t>; auto const num_tree = forest.tree_count(); auto const num_grove = raft_proto::ceildiv(num_tree, grove_size); auto const num_chunk = raft_proto::ceildiv(row_count, chunk_size); auto output_workspace = std::vector<output_t>(row_count * num_outputs * num_grove, output_t{}); auto const task_count = num_grove * num_chunk; // Infer on each grove and chunk #pragma omp parallel for for (auto task_index = index_type{}; task_index < task_count; ++task_index) { auto const grove_index = task_index / num_chunk; auto const chunk_index = task_index % num_chunk; auto const start_row = chunk_index * chunk_size; auto const end_row = std::min(start_row + chunk_size, row_count); auto const start_tree = grove_index * grove_size; auto const end_tree = std::min(start_tree + grove_size, num_tree); for (auto row_index = start_row; row_index < end_row; ++row_index) { for (auto tree_index = start_tree; tree_index < end_tree; ++tree_index) { auto tree_output = std::conditional_t<predict_leaf, index_type, std::conditional_t<has_vector_leaves, typename node_t::index_type, typename node_t::threshold_type>>{}; tree_output = evaluate_tree<has_vector_leaves, has_categorical_nodes, has_nonlocal_categories, predict_leaf>( forest, tree_index, input + row_index * col_count, categorical_data); if constexpr (predict_leaf) { output_workspace[row_index * num_outputs * num_grove + tree_index * num_grove + grove_index] = static_cast<typename forest_t::io_type>(tree_output); } else { auto const default_num_outputs = forest.num_outputs(); if constexpr (has_vector_leaves) { auto output_offset = (row_index * num_outputs * num_grove + tree_index * default_num_outputs * num_grove * (infer_type == infer_kind::per_tree) + grove_index); for (auto output_index = index_type{}; output_index < default_num_outputs; ++output_index) { output_workspace[output_offset + output_index * num_grove] += vector_output_p[tree_output * default_num_outputs + output_index]; } } else { auto output_offset = (row_index * num_outputs * num_grove + (tree_index % default_num_outputs) * num_grove * (infer_type == infer_kind::default_kind) + tree_index * num_grove * (infer_type == infer_kind::per_tree) + grove_index); output_workspace[output_offset] += tree_output; } } } // Trees } // Rows } // Tasks // Sum over grove and postprocess #pragma omp parallel for for (auto row_index = index_type{}; row_index < row_count; ++row_index) { for (auto output_index = index_type{}; output_index < num_outputs; ++output_index) { auto grove_offset = (row_index * num_outputs * num_grove + output_index * num_grove); output_workspace[grove_offset] = std::accumulate(std::begin(output_workspace) + grove_offset, std::begin(output_workspace) + grove_offset + num_grove, output_t{}); } postproc(output_workspace.data() + row_index * num_outputs * num_grove, num_outputs, output + row_index * num_outputs, num_grove); } } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/infer_kernel/shared_memory_buffer.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <cuml/experimental/fil/detail/index_type.hpp> #include <stddef.h> #include <type_traits> namespace ML { namespace experimental { namespace fil { /* A struct used to simplify complex access to a buffer of shared memory * * @param buffer A pointer to the shared memory allocation * @param size The size in bytes of the shared memory allocation */ struct shared_memory_buffer { __device__ shared_memory_buffer(std::byte* buffer = nullptr, index_type size = index_type{}) : data{buffer}, total_size{size}, remaining_data{buffer}, remaining_size{size} { } /* If possible, copy the given number of rows with the given number of columns from source * to the end of this buffer, padding each row by the given number of * elements (usually to reduce memory bank conflicts). If there is not enough * room, no copy is performed. Return a pointer to the desired data, whether * that is in the original location or copied to shared memory. */ template <typename T> __device__ auto* copy(T* source, index_type row_count, index_type col_count, index_type row_pad = index_type{}) { auto* dest = reinterpret_cast<std::remove_const_t<T>*>(remaining_data); auto source_count = row_count * col_count; auto dest_count = row_count * (col_count + row_pad); auto copy_data = (dest_count * sizeof(T) <= remaining_size); source_count *= copy_data; for (auto i = threadIdx.x; i < source_count; i += blockDim.x) { dest[i + row_pad * (i / col_count)] = source[i]; } auto* result = copy_data ? static_cast<T*>(dest) : source; requires_sync = requires_sync || copy_data; auto offset = dest_count * index_type(sizeof(T)); remaining_data += offset; remaining_size -= offset; return result; } /* If possible, copy the given number of elements from source to the end of this buffer * If there is not enough room, no copy is performed. Return a pointer to the * desired data, whether that is in the original location or copied to shared * memory. */ template <typename T> __device__ auto* copy(T* source, index_type element_count) { auto* dest = reinterpret_cast<std::remove_const_t<T>*>(remaining_data); auto copy_data = (element_count * index_type(sizeof(T)) <= remaining_size); element_count *= copy_data; for (auto i = threadIdx.x; i < element_count; i += blockDim.x) { dest[i] = source[i]; } auto* result = copy_data ? static_cast<T*>(dest) : source; requires_sync = requires_sync || copy_data; auto offset = element_count * index_type(sizeof(T)); remaining_data += offset; remaining_size -= offset; return result; } /* If possible, fill the next element_count elements with given value. If * there is not enough room, the fill is not performed. Return a pointer to * the start of the desired data if the fill was possible or else nullptr. */ template <typename T> __device__ auto* fill(index_type element_count, T value = T{}, T* fallback_buffer = nullptr) { auto* dest = reinterpret_cast<std::remove_const_t<T>*>(remaining_data); auto copy_data = (element_count * index_type(sizeof(T)) <= remaining_size); element_count *= copy_data; for (auto i = threadIdx.x; i < element_count; i += blockDim.x) { dest[i] = value; } auto* result = copy_data ? static_cast<T*>(dest) : fallback_buffer; requires_sync = requires_sync || copy_data; auto offset = element_count * index_type(sizeof(T)); remaining_data += offset; remaining_size -= offset; return result; } /* Clear all stored data and return a pointer to the beginning of available * shared memory */ __device__ auto* clear() { remaining_size = total_size; remaining_data = data; return remaining_data; } /* Pad stored data to ensure correct alignment for given type */ template <typename T> __device__ void align() { auto pad_required = (total_size - remaining_size) % index_type(sizeof(T)); remaining_data += pad_required; remaining_size -= pad_required; } /* If necessary, sync threads. Note that this can cause a deadlock if not all * threads call this method. */ __device__ void sync() { if (requires_sync) { __syncthreads(); } requires_sync = false; } /* Return the remaining size in bytes left in this buffer */ __device__ auto remaining() { return remaining_size; } private: std::byte* data; index_type total_size; std::byte* remaining_data; index_type remaining_size; bool requires_sync; }; } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail
rapidsai_public_repos/cuml/cpp/include/cuml/experimental/fil/detail/infer_kernel/gpu.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <cuml/experimental/fil/detail/evaluate_tree.hpp> #include <cuml/experimental/fil/detail/gpu_introspection.hpp> #include <cuml/experimental/fil/detail/index_type.hpp> #include <cuml/experimental/fil/detail/infer_kernel/shared_memory_buffer.cuh> #include <cuml/experimental/fil/detail/postprocessor.hpp> #include <cuml/experimental/fil/detail/raft_proto/ceildiv.hpp> #include <cuml/experimental/fil/detail/raft_proto/padding.hpp> #include <cuml/experimental/fil/infer_kind.hpp> #include <stddef.h> namespace ML { namespace experimental { namespace fil { namespace detail { /** * The GPU kernel used to actually perform forest inference * * @tparam has_categorical_nodes Whether or not this kernel should be * compiled to operate on trees with categorical nodes. * @tparam chunk_size The number of rows to be simultaneously processed * in each iteration of inference within a single block. This is a * performance tuning parameter, and having it fixed at compile-time offers a * measurable performance benefit. In standard cuML FIL, we compile for all * powers of 2 from 1 to 32. A power of 2 is not guaranteed to optimize * performance for all batch sizes and models, but it is far more likely to * than other values. * @tparam forest_t The type of the forest object which will be used for * inference. * @tparam vector_output_t If non-nullptr_t, this indicates the type we expect * for outputs from vector leaves. * @tparam categorical_data_t If non-nullptr_t, this indicates the type we * expect for non-local categorical data storage. * @param forest The forest used to perform inference * @param postproc The postprocessor object used to store all necessary * data for postprocessing * @param output Pointer to the device-accessible buffer where output * should be written * @param input Pointer to the device-accessible buffer where input should be * read from * @param row_count The number of rows in the input * @param col_count The number of columns per row in the input * @param num_outputs The expected number of output elements per row * @param shared_mem_byte_size The number of bytes of shared memory allocated * to this kernel. * @param output_workspace_size The total number of temporary elements required * to be stored as an intermediate output during inference * @param vector_output_p If non-nullptr, a pointer to the stored leaf * vector outputs for all leaf nodes * @param categorical_data If non-nullptr, a pointer to where non-local * data on categorical splits are stored. * @param infer_type Type of inference to perform. Defaults to summing the outputs of all trees * and produce an output per row. If set to "per_tree", we will instead output all outputs of * individual trees. If set to "leaf_id", we will instead output the integer ID of the leaf node * for each tree. * @param global_mem_fallback_buffer Buffer to use as a fallback, when there isn't enough shared * memory. Set it to nullptr to disable */ template <bool has_categorical_nodes, index_type chunk_size, typename forest_t, typename vector_output_t = std::nullptr_t, typename categorical_data_t = std::nullptr_t> __global__ void __launch_bounds__(MAX_THREADS_PER_BLOCK, MIN_BLOCKS_PER_SM) infer_kernel( forest_t forest, postprocessor<typename forest_t::io_type> postproc, typename forest_t::io_type* output, typename forest_t::io_type const* input, index_type row_count, index_type col_count, index_type num_outputs, index_type shared_mem_byte_size, index_type output_workspace_size, vector_output_t vector_output_p = nullptr, categorical_data_t categorical_data = nullptr, infer_kind infer_type = infer_kind::default_kind, typename forest_t::template raw_output_type<vector_output_t>* workspace_fallback = nullptr) { auto const default_num_outputs = forest.num_outputs(); auto constexpr has_vector_leaves = !std::is_same_v<vector_output_t, std::nullptr_t>; auto constexpr has_nonlocal_categories = !std::is_same_v<categorical_data_t, std::nullptr_t>; using output_t = typename forest_t::template raw_output_type<vector_output_t>; extern __shared__ std::byte shared_mem_raw[]; auto shared_mem = shared_memory_buffer(shared_mem_raw, shared_mem_byte_size); using node_t = typename forest_t::node_type; using io_t = typename forest_t::io_type; for (auto i = blockIdx.x * chunk_size; i < row_count; i += chunk_size * gridDim.x) { // i: the ID of the first row in the current chunk shared_mem.clear(); auto* output_workspace = shared_mem.fill<output_t>( output_workspace_size, output_t{}, (workspace_fallback + blockIdx.x * output_workspace_size)); // Handle as many rows as requested per loop or as many rows as are left to // process auto rows_in_this_iteration = min(chunk_size, row_count - i); auto* input_data = shared_mem.copy(input + i * col_count, rows_in_this_iteration, col_count); auto task_count = chunk_size * forest.tree_count(); auto num_grove = raft_proto::ceildiv(min(index_type(blockDim.x), task_count), chunk_size) * (infer_type == infer_kind::default_kind) + (infer_type != infer_kind::default_kind); // Note that this sync is safe because every thread in the block will agree // on whether or not a sync is required shared_mem.sync(); // Every thread must iterate the same number of times in order to avoid a // deadlock on __syncthreads, so we round the task_count up to the next // multiple of the number of threads in this block. We then only perform // work within the loop if the task_index is below the actual task_count. auto const task_count_rounded_up = blockDim.x * raft_proto::ceildiv(task_count, blockDim.x); // Infer on each tree and row for (auto task_index = threadIdx.x; task_index < task_count_rounded_up; task_index += blockDim.x) { auto row_index = task_index % chunk_size; auto real_task = task_index < task_count && row_index < rows_in_this_iteration; row_index *= real_task; auto tree_index = task_index * real_task / chunk_size; auto grove_index = (threadIdx.x / chunk_size) * (infer_type == infer_kind::default_kind); auto tree_output = std::conditional_t<has_vector_leaves, typename node_t::index_type, typename node_t::threshold_type>{}; auto leaf_node_id = index_type{}; if (infer_type == infer_kind::leaf_id) { leaf_node_id = evaluate_tree<has_vector_leaves, has_categorical_nodes, has_nonlocal_categories, true>( forest, tree_index, input_data + row_index * col_count, categorical_data); } else { tree_output = evaluate_tree<has_vector_leaves, has_categorical_nodes, has_nonlocal_categories, false>( forest, tree_index, input_data + row_index * col_count, categorical_data); } if (infer_type == infer_kind::leaf_id) { output_workspace[row_index * num_outputs * num_grove + tree_index * num_grove + grove_index] = static_cast<typename forest_t::io_type>(leaf_node_id); } else { if constexpr (has_vector_leaves) { auto output_offset = (row_index * num_outputs * num_grove + tree_index * default_num_outputs * num_grove * (infer_type == infer_kind::per_tree) + grove_index); for (auto output_index = index_type{}; output_index < default_num_outputs; ++output_index) { if (real_task) { output_workspace[output_offset + output_index * num_grove] += vector_output_p[tree_output * default_num_outputs + output_index]; } } } else { auto output_offset = (row_index * num_outputs * num_grove + (tree_index % default_num_outputs) * num_grove * (infer_type == infer_kind::default_kind) + tree_index * num_grove * (infer_type == infer_kind::per_tree) + grove_index); if (real_task) { output_workspace[output_offset] += tree_output; } } } __syncthreads(); } auto padded_num_groves = raft_proto::padded_size(num_grove, WARP_SIZE); for (auto row_index = threadIdx.x / WARP_SIZE; row_index < rows_in_this_iteration; row_index += blockDim.x / WARP_SIZE) { for (auto class_index = index_type{}; class_index < num_outputs; ++class_index) { auto grove_offset = (row_index * num_outputs * num_grove + class_index * num_grove); auto class_sum = output_t{}; for (auto grove_index = threadIdx.x % WARP_SIZE; grove_index < padded_num_groves; grove_index += WARP_SIZE) { auto real_thread = grove_index < num_grove; auto out_index = grove_offset + grove_index * real_thread; class_sum *= (threadIdx.x % WARP_SIZE == 0); class_sum += output_workspace[out_index] * real_thread; for (auto thread_offset = (WARP_SIZE >> 1); thread_offset > 0; thread_offset >>= 1) { class_sum += __shfl_down_sync(0xFFFFFFFF, class_sum, thread_offset); } } if (threadIdx.x % WARP_SIZE == 0) { output_workspace[grove_offset] = class_sum; } } if (threadIdx.x % WARP_SIZE == 0) { postproc(output_workspace + row_index * num_outputs * num_grove, num_outputs, output + ((i + row_index) * num_outputs), num_grove); } } __syncthreads(); } } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/ensemble/randomforest.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/common/logger.hpp> #include <cuml/ensemble/treelite_defs.hpp> #include <cuml/tree/decisiontree.hpp> #include <map> #include <memory> namespace raft { class handle_t; // forward decl } namespace ML { enum RF_type { CLASSIFICATION, REGRESSION, }; enum task_category { REGRESSION_MODEL = 1, CLASSIFICATION_MODEL = 2 }; struct RF_metrics { RF_type rf_type; // Classification metrics float accuracy; // Regression metrics double mean_abs_error; double mean_squared_error; double median_abs_error; }; RF_metrics set_all_rf_metrics(RF_type rf_type, float accuracy, double mean_abs_error, double mean_squared_error, double median_abs_error); RF_metrics set_rf_metrics_classification(float accuracy); RF_metrics set_rf_metrics_regression(double mean_abs_error, double mean_squared_error, double median_abs_error); void print(const RF_metrics rf_metrics); struct RF_params { /** * Number of decision trees in the random forest. */ int n_trees; /** * Control bootstrapping. * If bootstrapping is set to true, bootstrapped samples are used for building * each tree. Bootstrapped sampling is done by randomly drawing * round(max_samples * n_samples) number of samples with replacement. More on * bootstrapping: * https://en.wikipedia.org/wiki/Bootstrap_aggregating * If bootstrapping is set to false, whole dataset is used to build each * tree. */ bool bootstrap; /** * Ratio of dataset rows used while fitting each tree. */ float max_samples; /** * Decision tree training hyper parameter struct. */ /** * random seed */ uint64_t seed; /** * Number of concurrent GPU streams for parallel tree building. * Each stream is independently managed by CPU thread. * N streams need N times RF workspace. */ int n_streams; DT::DecisionTreeParams tree_params; }; /* Update labels so they are unique from 0 to n_unique_vals. Create an old_label to new_label map per random forest. */ void preprocess_labels(int n_rows, std::vector<int>& labels, std::map<int, int>& labels_map, int verbosity = CUML_LEVEL_INFO); /* Revert preprocessing effect, if needed. */ void postprocess_labels(int n_rows, std::vector<int>& labels, std::map<int, int>& labels_map, int verbosity = CUML_LEVEL_INFO); template <class T, class L> struct RandomForestMetaData { std::vector<std::shared_ptr<DT::TreeMetaDataNode<T, L>>> trees; RF_params rf_params; }; template <class T, class L> void delete_rf_metadata(RandomForestMetaData<T, L>* forest); template <class T, class L> std::string get_rf_summary_text(const RandomForestMetaData<T, L>* forest); template <class T, class L> std::string get_rf_detailed_text(const RandomForestMetaData<T, L>* forest); template <class T, class L> std::string get_rf_json(const RandomForestMetaData<T, L>* forest); template <class T, class L> void build_treelite_forest(ModelHandle* model, const RandomForestMetaData<T, L>* forest, int num_features); ModelHandle concatenate_trees(std::vector<ModelHandle> treelite_handles); void compare_concat_forest_to_subforests(ModelHandle concat_tree_handle, std::vector<ModelHandle> treelite_handles); // ----------------------------- Classification ----------------------------------- // typedef RandomForestMetaData<float, int> RandomForestClassifierF; typedef RandomForestMetaData<double, int> RandomForestClassifierD; void fit(const raft::handle_t& user_handle, RandomForestClassifierF*& forest, float* input, int n_rows, int n_cols, int* labels, int n_unique_labels, RF_params rf_params, int verbosity = CUML_LEVEL_INFO); void fit(const raft::handle_t& user_handle, RandomForestClassifierD*& forest, double* input, int n_rows, int n_cols, int* labels, int n_unique_labels, RF_params rf_params, int verbosity = CUML_LEVEL_INFO); void predict(const raft::handle_t& user_handle, const RandomForestClassifierF* forest, const float* input, int n_rows, int n_cols, int* predictions, int verbosity = CUML_LEVEL_INFO); void predict(const raft::handle_t& user_handle, const RandomForestClassifierD* forest, const double* input, int n_rows, int n_cols, int* predictions, int verbosity = CUML_LEVEL_INFO); RF_metrics score(const raft::handle_t& user_handle, const RandomForestClassifierF* forest, const int* ref_labels, int n_rows, const int* predictions, int verbosity = CUML_LEVEL_INFO); RF_metrics score(const raft::handle_t& user_handle, const RandomForestClassifierD* forest, const int* ref_labels, int n_rows, const int* predictions, int verbosity = CUML_LEVEL_INFO); RF_params set_rf_params(int max_depth, int max_leaves, float max_features, int max_n_bins, int min_samples_leaf, int min_samples_split, float min_impurity_decrease, bool bootstrap, int n_trees, float max_samples, uint64_t seed, CRITERION split_criterion, int cfg_n_streams, int max_batch_size); // ----------------------------- Regression ----------------------------------- // typedef RandomForestMetaData<float, float> RandomForestRegressorF; typedef RandomForestMetaData<double, double> RandomForestRegressorD; void fit(const raft::handle_t& user_handle, RandomForestRegressorF*& forest, float* input, int n_rows, int n_cols, float* labels, RF_params rf_params, int verbosity = CUML_LEVEL_INFO); void fit(const raft::handle_t& user_handle, RandomForestRegressorD*& forest, double* input, int n_rows, int n_cols, double* labels, RF_params rf_params, int verbosity = CUML_LEVEL_INFO); void predict(const raft::handle_t& user_handle, const RandomForestRegressorF* forest, const float* input, int n_rows, int n_cols, float* predictions, int verbosity = CUML_LEVEL_INFO); void predict(const raft::handle_t& user_handle, const RandomForestRegressorD* forest, const double* input, int n_rows, int n_cols, double* predictions, int verbosity = CUML_LEVEL_INFO); RF_metrics score(const raft::handle_t& user_handle, const RandomForestRegressorF* forest, const float* ref_labels, int n_rows, const float* predictions, int verbosity = CUML_LEVEL_INFO); RF_metrics score(const raft::handle_t& user_handle, const RandomForestRegressorD* forest, const double* ref_labels, int n_rows, const double* predictions, int verbosity = CUML_LEVEL_INFO); }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/ensemble/treelite_defs.hpp
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once // Same definition as ModelHandle in treelite, to avoid dependencies // of cuML C++ headers on treelite headers. // Original definition here: // https://github.com/dmlc/treelite/blob/fca738770d2b09be1c0842fac9c0f5e3f6126c40/include/treelite/c_api.h#L25 typedef void* ModelHandle;
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/solvers/params.hpp
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { enum lr_type { OPTIMAL, CONSTANT, INVSCALING, ADAPTIVE, }; enum loss_funct { SQRD_LOSS, HINGE, LOG, }; enum penalty { NONE, L1, L2, ELASTICNET }; }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/solvers/solver.hpp
/* * Copyright (c) 2018-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace raft { class handle_t; } namespace ML { namespace Solver { void sgdFit(raft::handle_t& handle, float* input, int n_rows, int n_cols, float* labels, float* coef, float* intercept, bool fit_intercept, int batch_size, int epochs, int lr_type, float eta0, float power_t, int loss, int penalty, float alpha, float l1_ratio, bool shuffle, float tol, int n_iter_no_change); void sgdFit(raft::handle_t& handle, double* input, int n_rows, int n_cols, double* labels, double* coef, double* intercept, bool fit_intercept, int batch_size, int epochs, int lr_type, double eta0, double power_t, int loss, int penalty, double alpha, double l1_ratio, bool shuffle, double tol, int n_iter_no_change); void sgdPredict(raft::handle_t& handle, const float* input, int n_rows, int n_cols, const float* coef, float intercept, float* preds, int loss); void sgdPredict(raft::handle_t& handle, const double* input, int n_rows, int n_cols, const double* coef, double intercept, double* preds, int loss); void sgdPredictBinaryClass(raft::handle_t& handle, const float* input, int n_rows, int n_cols, const float* coef, float intercept, float* preds, int loss); void sgdPredictBinaryClass(raft::handle_t& handle, const double* input, int n_rows, int n_cols, const double* coef, double intercept, double* preds, int loss); /** * Fits a linear, lasso, and elastic-net regression model using Coordinate Descent solver. * * i.e. finds coefficients that minimize the following loss function: * * f(coef) = 1/2 * || labels - input * coef ||^2 * + 1/2 * alpha * (1 - l1_ratio) * ||coef||^2 * + alpha * l1_ratio * ||coef||_1 * * * @param handle * Reference of raft::handle_t * @param input * pointer to an array in column-major format (size of n_rows, n_cols) * @param n_rows * n_samples or rows in input * @param n_cols * n_features or columns in X * @param labels * pointer to an array for labels (size of n_rows) * @param coef * pointer to an array for coefficients (size of n_cols). This will be filled with * coefficients once the function is executed. * @param intercept * pointer to a scalar for intercept. This will be filled * once the function is executed * @param fit_intercept * boolean parameter to control if the intercept will be fitted or not * @param normalize * boolean parameter to control if the data will be normalized or not; * NB: the input is scaled by the column-wise biased sample standard deviation estimator. * @param epochs * Maximum number of iterations that solver will run * @param loss * enum to use different loss functions. Only linear regression loss functions is supported * right now * @param alpha * L1 parameter * @param l1_ratio * ratio of alpha will be used for L1. (1 - l1_ratio) * alpha will be used for L2 * @param shuffle * boolean parameter to control whether coordinates will be picked randomly or not * @param tol * tolerance to stop the solver * @param sample_weight * device pointer to sample weight vector of length n_rows (nullptr or uniform weights) * This vector is modified during the computation */ void cdFit(raft::handle_t& handle, float* input, int n_rows, int n_cols, float* labels, float* coef, float* intercept, bool fit_intercept, bool normalize, int epochs, int loss, float alpha, float l1_ratio, bool shuffle, float tol, float* sample_weight = nullptr); void cdFit(raft::handle_t& handle, double* input, int n_rows, int n_cols, double* labels, double* coef, double* intercept, bool fit_intercept, bool normalize, int epochs, int loss, double alpha, double l1_ratio, bool shuffle, double tol, double* sample_weight = nullptr); void cdPredict(raft::handle_t& handle, const float* input, int n_rows, int n_cols, const float* coef, float intercept, float* preds, int loss); void cdPredict(raft::handle_t& handle, const double* input, int n_rows, int n_cols, const double* coef, double intercept, double* preds, int loss); }; // namespace Solver }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/solvers/lars.hpp
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/handle.hpp> namespace ML { namespace Solver { namespace Lars { /** * @brief Train a regressor using LARS method. * * @param handle RAFT handle * @param X device array of training vectors in column major format, * size [n_rows * n_cols]. Note that the columns of X will be permuted if * the Gram matrix is not specified. It is expected that X is normalized so * that each column has zero mean and unit variance. * @param n_rows number of training samples * @param n_cols number of feature columns * @param y device array of the regression targets, size [n_rows]. y should * be normalized to have zero mean. * @param beta device array of regression coefficients, has to be allocated on * entry, size [max_iter] * @param active_idx device array containing the indices of active variables. * Must be allocated on entry. Size [max_iter] * @param alphas the maximum correlation along the regularization path are * returned here. Must be a device array allocated on entry Size [max_iter]. * @param n_active host pointer to return the number of active elements, scalar. * @param Gram device array containing Gram matrix (X.T * X). Can be nullptr. * Size [n_cols * ld_G] * @param max_iter maximum number of iterations, this equals with the maximum * number of coefficients returned. max_iter <= n_cols. * @param coef_path coefficients along the regularization path are returned * here. Must be nullptr, or a device array already allocated on entry. * Size [max_iter * max_iter]. * @param verbosity verbosity level * @param ld_X leading dimension of X (stride of columns, ld_X >= n_rows). * @param ld_G leading dimesion of G (ld_G >= n_cols) * @param eps numeric parameter for Cholesky rank one update */ template <typename math_t, typename idx_t> void larsFit(const raft::handle_t& handle, math_t* X, idx_t n_rows, idx_t n_cols, const math_t* y, math_t* beta, idx_t* active_idx, math_t* alphas, idx_t* n_active, math_t* Gram, int max_iter, math_t* coef_path, int verbosity, idx_t ld_X, idx_t ld_G, math_t eps); /** * @brief Predict with LARS regressor. * * @param handle RAFT handle * @param X device array of training vectors in column major format, * size [n_rows * n_cols]. * @param n_rows number of training samples * @param n_cols number of feature columns * @param ld_X leading dimension of X (stride of columns) * @param beta device array of regression coefficients, size [n_active] * @param n_active the number of regression coefficients * @param active_idx device array containing the indices of active variables. * Only these columns of X will be used for prediction, size [n_active]. * @param intercept * @param preds device array to store the predictions, size [n_rows]. Must be * allocated on entry. */ template <typename math_t, typename idx_t> void larsPredict(const raft::handle_t& handle, const math_t* X, idx_t n_rows, idx_t n_cols, idx_t ld_X, const math_t* beta, idx_t n_active, idx_t* active_idx, math_t intercept, math_t* preds); }; // namespace Lars }; // namespace Solver }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/solvers/cd_mg.hpp
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/linear_model/glm.hpp> #include <cumlprims/opg/matrix/data.hpp> #include <cumlprims/opg/matrix/part_descriptor.hpp> namespace ML { namespace CD { namespace opg { /** * @brief performs MNMG fit operation for the ridge regression * @param[in] handle: the internal cuml handle object * @param[in] input_data: vector holding all partitions for that rank * @param[in] input_desc: PartDescriptor object for the input * @param[in] labels: labels data * @param[out] coef: learned regression coefficients * @param[out] intercept: intercept value * @param[in] fit_intercept: fit intercept or not * @param[in] normalize: normalize the data or not * @param[in] epochs: number of epochs * @param[in] alpha: ridge parameter * @param[in] l1_ratio: l1 ratio * @param[in] shuffle: whether to shuffle the data * @param[in] tol: tolerance for early stopping during fitting * @param[in] verbose */ void fit(raft::handle_t& handle, std::vector<MLCommon::Matrix::Data<float>*>& input_data, MLCommon::Matrix::PartDescriptor& input_desc, std::vector<MLCommon::Matrix::Data<float>*>& labels, float* coef, float* intercept, bool fit_intercept, bool normalize, int epochs, float alpha, float l1_ratio, bool shuffle, float tol, bool verbose); void fit(raft::handle_t& handle, std::vector<MLCommon::Matrix::Data<double>*>& input_data, MLCommon::Matrix::PartDescriptor& input_desc, std::vector<MLCommon::Matrix::Data<double>*>& labels, double* coef, double* intercept, bool fit_intercept, bool normalize, int epochs, double alpha, double l1_ratio, bool shuffle, double tol, bool verbose); /** * @brief performs MNMG prediction for OLS * @param[in] handle: the internal cuml handle object * @param[in] rank_sizes: includes all the partition size information for the rank * @param[in] n_parts: number of partitions * @param[in] input: input data * @param[in] n_rows: number of rows of input data * @param[in] n_cols: number of cols of input data * @param[in] coef: OLS coefficients * @param[in] intercept: the fit intercept * @param[out] preds: predictions * @param[in] verbose */ void predict(raft::handle_t& handle, MLCommon::Matrix::RankSizePair** rank_sizes, size_t n_parts, MLCommon::Matrix::Data<float>** input, size_t n_rows, size_t n_cols, float* coef, float intercept, MLCommon::Matrix::Data<float>** preds, bool verbose); void predict(raft::handle_t& handle, MLCommon::Matrix::RankSizePair** rank_sizes, size_t n_parts, MLCommon::Matrix::Data<double>** input, size_t n_rows, size_t n_cols, double* coef, double intercept, MLCommon::Matrix::Data<double>** preds, bool verbose); }; // end namespace opg }; // namespace CD }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/tsa/holtwinters_api.h
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/cuml_api.h> #include <stdio.h> #ifdef __cplusplus extern "C" { #endif typedef enum cumlHoltWintersSeasonal_t { ADDITIVE, MULTIPLICATIVE } cumlHoltWintersSeasonal_t; /** * @brief Provides buffer sizes for HoltWinters algorithm * @param[in] n * n_samples in time-series * @param[in] batch_size * number of time-series in X * @param[in] frequency * number of periods in a season of the time-series * @param[out] start_leveltrend_len * pointer which will hold the length of the level/trend array buffers * @param[out] start_season_len * pointer which will hold the length of the seasonal array buffer * @param[out] components_len * pointer which will hold the length of all three components * @param[out] error_len * pointer which will hold the length of the SSE Error * @param[out] leveltrend_coef_shift * pointer which will hold the offset to level/trend arrays * @param[out] season_coef_shift * pointer which will hold the offset to season array * @return CUML_SUCCESS on success and other corresponding flags upon any failures. */ cumlError_t cumlHoltWinters_buffer_size(int n, int batch_size, int frequency, int* start_leveltrend_len, int* start_season_len, int* components_len, int* error_len, int* leveltrend_coef_shift, int* season_coef_shift); /** * @defgroup HoltWinterFit Training methods * @brief Fits a HoltWinters model * @param[in] handle * cuml handle to use across the algorithm * @param[in] n * n_samples in time-series * @param[in] batch_size * number of time-series in X * @param[in] frequency * number of periods in a season of the time-series * @param[in] start_periods * number of seasons to be used for seasonal seed values * @param[in] seasonal * type of seasonal component (ADDITIVE or MULTIPLICATIVE) * @param[in] epsilon * the error tolerance value for optimization * @param[in] data * device pointer to the data to fit on * @param[out] level_ptr * device pointer to array which will hold level components * @param[out] trend_ptr * device pointer to array which will hold trend components * @param[out] season_ptr * device pointer to array which will hold season components * @param[out] SSE_error_ptr * device pointer to array which will hold training SSE error * @return CUML_SUCCESS on success and other corresponding flags upon any failures. * @{ */ cumlError_t cumlHoltWintersSp_fit(cumlHandle_t handle, int n, int batch_size, int frequency, int start_periods, cumlHoltWintersSeasonal_t seasonal, float epsilon, float* data, float* level_ptr, float* trend_ptr, float* season_ptr, float* SSE_error_ptr); cumlError_t cumlHoltWintersDp_fit(cumlHandle_t handle, int n, int batch_size, int frequency, int start_periods, cumlHoltWintersSeasonal_t seasonal, double epsilon, double* data, double* level_ptr, double* trend_ptr, double* season_ptr, double* SSE_error_ptr); /** @} */ /** * @defgroup HoltWinterForecast Forecast methods * @brief Forecasts future points from fitted HoltWinters model * @param[in] handle * cuml handle to use across the algorithm * @param[in] n * n_samples in time-series * @param[in] batch_size * number of time-series in X * @param[in] frequency * number of periods in a season of the time-series * @param[in] h * number of future points to predict in the time-series * @param[in] seasonal * type of seasonal component (ADDITIVE or MULTIPLICATIVE) * @param[out] level_d * device pointer to array which holds level components * @param[out] trend_d * device pointer to array which holds trend components * @param[out] season_d * device pointer to array which holds season components * @param[out] forecast_d * device pointer to array which will hold the forecast points * @return CUML_SUCCESS on success and other corresponding flags upon any failures. * @{ */ cumlError_t cumlHoltWintersSp_forecast(cumlHandle_t handle, int n, int batch_size, int frequency, int h, cumlHoltWintersSeasonal_t seasonal, float* level_ptr, float* trend_ptr, float* season_ptr, float* forecast_ptr); cumlError_t cumlHoltWintersDp_forecast(cumlHandle_t handle, int n, int batch_size, int frequency, int h, cumlHoltWintersSeasonal_t seasonal, double* level_ptr, double* trend_ptr, double* season_ptr, double* forecast_ptr); /** @} */ #ifdef __cplusplus } #endif
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/tsa/stationarity.h
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace raft { class handle_t; } namespace ML { namespace Stationarity { /** * @brief Perform the KPSS stationarity test on the data differenced according * to the given order * * @param[in] handle cuML handle * @param[in] d_y Input data (column-major, series in columns) * @param[out] results Boolean device array to store the results * @param[in] batch_size Batch size * @param[in] n_obs Number of observations * @param[in] d Order of simple differencing * @param[out] D Order of seasonal differencing * @param[in] s Seasonal period if D > 0 (else unused) * @param[in] pval_threshold P-value threshold above which a series is * considered stationary */ void kpss_test(const raft::handle_t& handle, const float* d_y, bool* results, int batch_size, int n_obs, int d, int D, int s, float pval_threshold); void kpss_test(const raft::handle_t& handle, const double* d_y, bool* results, int batch_size, int n_obs, int d, int D, int s, double pval_threshold); } // namespace Stationarity } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/tsa/holtwinters.h
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "holtwinters_params.h" namespace raft { class handle_t; } namespace ML { namespace HoltWinters { /** * Provides buffer sizes for HoltWinters algorithm * @param[in] n * n_samples in time-series * @param[in] batch_size * number of time-series in X * @param[in] frequency * number of periods in a season of the time-series * @param[out] start_leveltrend_len * pointer which will hold the length of the level/trend array buffers * @param[out] start_season_len * pointer which will hold the length of the seasonal array buffer * @param[out] components_len * pointer which will hold the length of all three components * @param[out] error_len * pointer which will hold the length of the SSE Error * @param[out] leveltrend_coef_shift * pointer which will hold the offset to level/trend arrays * @param[out] season_coef_shift * pointer which will hold the offset to season array */ void buffer_size(int n, int batch_size, int frequency, int* start_leveltrend_len, int* start_season_len, int* components_len, int* error_len, int* leveltrend_coef_shift, int* season_coef_shift); /** * Fits a HoltWinters model * @param[in] handle * cuml handle to use across the algorithm * @param[in] n * n_samples in time-series * @param[in] batch_size * number of time-series in X * @param[in] frequency * number of periods in a season of the time-series * @param[in] start_periods * number of seasons to be used for seasonal seed values * @param[in] seasonal * type of seasonal component (ADDITIVE or MULTIPLICATIVE) * @param[in] epsilon * the error tolerance value for optimization * @param[in] data * device pointer to the data to fit on * @param[out] level_d * device pointer to array which will hold level components * @param[out] trend_d * device pointer to array which will hold trend components * @param[out] season_d * device pointer to array which will hold season components * @param[out] error_d * device pointer to array which will hold training SSE error */ void fit(const raft::handle_t& handle, int n, int batch_size, int frequency, int start_periods, ML::SeasonalType seasonal, float epsilon, float* data, float* level_d, float* trend_d, float* season_d, float* error_d); void fit(const raft::handle_t& handle, int n, int batch_size, int frequency, int start_periods, ML::SeasonalType seasonal, double epsilon, double* data, double* level_d, double* trend_d, double* season_d, double* error_d); /** * Forecasts future points from fitted HoltWinters model * @param[in] handle * cuml handle to use across the algorithm * @param[in] n * n_samples in time-series * @param[in] batch_size * number of time-series in X * @param[in] frequency * number of periods in a season of the time-series * @param[in] h * number of future points to predict in the time-series * @param[in] seasonal * type of seasonal component (ADDITIVE or MULTIPLICATIVE) * @param[out] level_d * device pointer to array which holds level components * @param[out] trend_d * device pointer to array which holds trend components * @param[out] season_d * device pointer to array which holds season components * @param[out] forecast_d * device pointer to array which will hold the forecast points */ void forecast(const raft::handle_t& handle, int n, int batch_size, int frequency, int h, ML::SeasonalType seasonal, float* level_d, float* trend_d, float* season_d, float* forecast_d); void forecast(const raft::handle_t& handle, int n, int batch_size, int frequency, int h, ML::SeasonalType seasonal, double* level_d, double* trend_d, double* season_d, double* forecast_d); } // namespace HoltWinters } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/tsa/batched_arima.hpp
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/tsa/arima_common.h> namespace raft { class handle_t; } namespace ML { enum LoglikeMethod { CSS, MLE }; /** * Pack separate parameter arrays into a compact array * * @param[in] handle cuML handle * @param[in] params Parameter structure * @param[in] order ARIMA order * @param[in] batch_size Batch size * @param[out] param_vec Compact parameter array */ void pack(raft::handle_t& handle, const ARIMAParams<double>& params, const ARIMAOrder& order, int batch_size, double* param_vec); /** * Unpack a compact array into separate parameter arrays * * @param[in] handle cuML handle * @param[out] params Parameter structure * @param[in] order ARIMA order * @param[in] batch_size Batch size * @param[in] param_vec Compact parameter array */ void unpack(raft::handle_t& handle, ARIMAParams<double>& params, const ARIMAOrder& order, int batch_size, const double* param_vec); /** * Detect missing observations in a time series * * @param[in] handle cuML handle * @param[in] d_y Time series * @param[in] n_elem Total number of elements in the dataset */ bool detect_missing(raft::handle_t& handle, const double* d_y, int n_elem); /** * Compute the differenced series (seasonal and/or non-seasonal differences) * * @param[in] handle cuML handle * @param[out] d_y_diff Differenced series * @param[in] d_y Original series * @param[in] batch_size Batch size * @param[in] n_obs Number of observations * @param[in] order ARIMA order */ void batched_diff(raft::handle_t& handle, double* d_y_diff, const double* d_y, int batch_size, int n_obs, const ARIMAOrder& order); /** * Compute the loglikelihood of the given parameter on the given time series * in a batched context. * * @param[in] handle cuML handle * @param[in] arima_mem Pre-allocated temporary memory * @param[in] d_y Series to fit: shape = (n_obs, batch_size) and * expects column major data layout. (device) * @param[in] d_exog Exogenous variables: shape = (n_obs, n_exog * batch_size) and * expects column major data layout. (device) * @param[in] batch_size Number of time series * @param[in] n_obs Number of observations in a time series * @param[in] order ARIMA hyper-parameters * @param[in] d_params Parameters to evaluate grouped by series: * [mu0, ar.., ma.., mu1, ..] (device) * @param[out] loglike Log-Likelihood of the model per series * @param[in] trans Run `jones_transform` on params. * @param[in] host_loglike Whether loglike is a host pointer * @param[in] method Whether to use sum-of-squares or Kalman filter * @param[in] truncate For CSS, start the sum-of-squares after a given * number of observations */ void batched_loglike(raft::handle_t& handle, const ARIMAMemory<double>& arima_mem, const double* d_y, const double* d_exog, int batch_size, int n_obs, const ARIMAOrder& order, const double* d_params, double* loglike, bool trans = true, bool host_loglike = true, LoglikeMethod method = MLE, int truncate = 0); /** * Compute the loglikelihood of the given parameter on the given time series * in a batched context. * * @note: this overload should be used when the parameters are already unpacked * to avoid useless packing / unpacking * * @param[in] handle cuML handle * @param[in] arima_mem Pre-allocated temporary memory * @param[in] d_y Series to fit: shape = (n_obs, batch_size) and * expects column major data layout. (device) * @param[in] d_exog Exogenous variables: shape = (n_obs, n_exog * batch_size) and * expects column major data layout. (device) * @param[in] batch_size Number of time series * @param[in] n_obs Number of observations in a time series * @param[in] order ARIMA hyper-parameters * @param[in] params ARIMA parameters (device) * @param[out] loglike Log-Likelihood of the model per series * @param[in] trans Run `jones_transform` on params. * @param[in] host_loglike Whether loglike is a host pointer * @param[in] method Whether to use sum-of-squares or Kalman filter * @param[in] truncate For CSS, start the sum-of-squares after a given * number of observations * @param[in] fc_steps Number of steps to forecast * @param[in] d_fc Array to store the forecast * @param[in] d_exog_fut Future values of exogenous variables * Shape (fc_steps, n_exog * batch_size) (col-major, device) * @param[in] level Confidence level for prediction intervals. 0 to * skip the computation. Else 0 < level < 1 * @param[out] d_lower Lower limit of the prediction interval * @param[out] d_upper Upper limit of the prediction interval */ void batched_loglike(raft::handle_t& handle, const ARIMAMemory<double>& arima_mem, const double* d_y, const double* d_exog, int batch_size, int n_obs, const ARIMAOrder& order, const ARIMAParams<double>& params, double* loglike, bool trans = true, bool host_loglike = true, LoglikeMethod method = MLE, int truncate = 0, int fc_steps = 0, double* d_fc = nullptr, const double* d_exog_fut = nullptr, double level = 0, double* d_lower = nullptr, double* d_upper = nullptr); /** * Compute the gradient of the log-likelihood * * @param[in] handle cuML handle * @param[in] arima_mem Pre-allocated temporary memory * @param[in] d_y Series to fit: shape = (n_obs, batch_size) and * expects column major data layout. (device) * @param[in] d_exog Exogenous variables: shape = (n_obs, n_exog * batch_size) and * expects column major data layout. (device) * @param[in] batch_size Number of time series * @param[in] n_obs Number of observations in a time series * @param[in] order ARIMA hyper-parameters * @param[in] d_x Parameters grouped by series * @param[out] d_grad Gradient to compute * @param[in] h Finite-differencing step size * @param[in] trans Run `jones_transform` on params * @param[in] method Whether to use sum-of-squares or Kalman filter * @param[in] truncate For CSS, start the sum-of-squares after a given * number of observations */ void batched_loglike_grad(raft::handle_t& handle, const ARIMAMemory<double>& arima_mem, const double* d_y, const double* d_exog, int batch_size, int n_obs, const ARIMAOrder& order, const double* d_x, double* d_grad, double h, bool trans = true, LoglikeMethod method = MLE, int truncate = 0); /** * Batched in-sample and out-of-sample prediction of a time-series given all * the model parameters * * @param[in] handle cuML handle * @param[in] arima_mem Pre-allocated temporary memory * @param[in] d_y Batched Time series to predict. * Shape: (num_samples, batch size) (device) * @param[in] d_exog Exogenous variables. * Shape = (n_obs, n_exog * batch_size) (device) * @param[in] d_exog_fut Future values of exogenous variables * Shape: (end - n_obs, batch_size) (device) * @param[in] batch_size Total number of batched time series * @param[in] n_obs Number of samples per time series * (all series must be identical) * @param[in] start Index to start the prediction * @param[in] end Index to end the prediction (excluded) * @param[in] order ARIMA hyper-parameters * @param[in] params ARIMA parameters (device) * @param[out] d_y_p Prediction output (device) * @param[in] pre_diff Whether to use pre-differencing * @param[in] level Confidence level for prediction intervals. 0 to * skip the computation. Else 0 < level < 1 * @param[out] d_lower Lower limit of the prediction interval * @param[out] d_upper Upper limit of the prediction interval */ void predict(raft::handle_t& handle, const ARIMAMemory<double>& arima_mem, const double* d_y, const double* d_exog, const double* d_exog_fut, int batch_size, int n_obs, int start, int end, const ARIMAOrder& order, const ARIMAParams<double>& params, double* d_y_p, bool pre_diff = true, double level = 0, double* d_lower = nullptr, double* d_upper = nullptr); /** * Compute an information criterion (AIC, AICc, BIC) * * @param[in] handle cuML handle * @param[in] arima_mem Pre-allocated temporary memory * @param[in] d_y Series to fit: shape = (n_obs, batch_size) and * expects column major data layout. (device) * @param[in] d_exog Exogenous variables. * Shape = (n_obs, n_exog * batch_size) (device) * @param[in] batch_size Total number of batched time series * @param[in] n_obs Number of samples per time series * (all series must be identical) * @param[in] order ARIMA hyper-parameters * @param[in] params ARIMA parameters (device) * @param[out] ic Array where to write the information criteria * Shape: (batch_size) (device) * @param[in] ic_type Type of information criterion wanted. * 0: AIC, 1: AICc, 2: BIC */ void information_criterion(raft::handle_t& handle, const ARIMAMemory<double>& arima_mem, const double* d_y, const double* d_exog, int batch_size, int n_obs, const ARIMAOrder& order, const ARIMAParams<double>& params, double* ic, int ic_type); /** * Provide initial estimates to ARIMA parameters mu, AR, and MA * * @param[in] handle cuML handle * @param[in] params ARIMA parameters (device) * @param[in] d_y Series to fit: shape = (n_obs, batch_size) and * expects column major data layout. (device) * @param[in] d_exog Exogenous variables. * Shape = (n_obs, n_exog * batch_size) (device) * @param[in] batch_size Total number of batched time series * @param[in] n_obs Number of samples per time series * (all series must be identical) * @param[in] order ARIMA hyper-parameters * @param[in] missing Are there missing observations? */ void estimate_x0(raft::handle_t& handle, ARIMAParams<double>& params, const double* d_y, const double* d_exog, int batch_size, int n_obs, const ARIMAOrder& order, bool missing); } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/tsa/batched_kalman.hpp
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/tsa/arima_common.h> namespace raft { class handle_t; } namespace ML { /** * An ARIMA specialized batched kalman filter to evaluate ARMA parameters and * provide the resulting prediction as well as loglikelihood fit. * * @param[in] handle cuML handle * @param[in] arima_mem Pre-allocated temporary memory * @param[in] d_ys Batched time series * Shape (nobs, batch_size) (col-major, device) * @param[in] d_exog Batched exogenous variables * Shape (nobs, n_exog * batch_size) (col-major, device) * @param[in] nobs Number of samples per time series * @param[in] params ARIMA parameters (device) * @param[in] order ARIMA hyper-parameters * @param[in] batch_size Number of series making up the batch * @param[out] d_loglike Resulting log-likelihood (per series) (device) * @param[out] d_pred Predictions * shape=(nobs-d-s*D, batch_size) (device) * @param[in] fc_steps Number of steps to forecast * @param[in] d_fc Array to store the forecast * @param[in] d_exog_fut Future values of exogenous variables * Shape (fc_steps, n_exog * batch_size) (col-major, device) * @param[in] level Confidence level for prediction intervals. 0 to * skip the computation. Else 0 < level < 1 * @param[out] d_lower Lower limit of the prediction interval * @param[out] d_upper Upper limit of the prediction interval */ void batched_kalman_filter(raft::handle_t& handle, const ARIMAMemory<double>& arima_mem, const double* d_ys, const double* d_exog, int nobs, const ARIMAParams<double>& params, const ARIMAOrder& order, int batch_size, double* d_loglike, double* d_pred, int fc_steps = 0, double* d_fc = nullptr, const double* d_exog_fut = nullptr, double level = 0, double* d_lower = nullptr, double* d_upper = nullptr); /** * Convenience function for batched "jones transform" used in ARIMA to ensure * certain properties of the AR and MA parameters (takes host array and * returns host array) * * @param[in] handle cuML handle * @param[in] arima_mem Pre-allocated temporary memory * @param[in] order ARIMA hyper-parameters * @param[in] batch_size Number of time series analyzed. * @param[in] isInv Do the inverse transform? * @param[in] h_params ARIMA parameters by batch (mu, ar, ma) (host) * @param[out] h_Tparams Transformed ARIMA parameters * (expects pre-allocated array of size * (p+q)*batch_size) (host) */ void batched_jones_transform(raft::handle_t& handle, const ARIMAMemory<double>& arima_mem, const ARIMAOrder& order, int batch_size, bool isInv, const double* h_params, double* h_Tparams); } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/tsa/auto_arima.h
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace raft { class handle_t; } namespace ML { /** * Batch division by mask step 1: build an index of the position of each series * in its new batch and measure the size of each sub-batch * * @param[in] handle cuML handle * @param[in] d_mask Boolean mask * @param[out] d_index Index of each series in its new batch * @param[in] batch_size Batch size * @return The number of 'true' series in the mask */ int divide_by_mask_build_index(const raft::handle_t& handle, const bool* d_mask, int* d_index, int batch_size); /** * Batch division by mask step 2: create both sub-batches from the mask and * index * * @param[in] handle cuML handle * @param[in] d_in Input batch. Each series is a contiguous chunk * @param[in] d_mask Boolean mask * @param[in] d_index Index of each series in its new batch * @param[out] d_out0 The sub-batch for the 'false' members * @param[out] d_out1 The sub-batch for the 'true' members * @param[in] batch_size Batch size * @param[in] n_obs Number of data points per series */ void divide_by_mask_execute(const raft::handle_t& handle, const float* d_in, const bool* d_mask, const int* d_index, float* d_out0, float* d_out1, int batch_size, int n_obs); void divide_by_mask_execute(const raft::handle_t& handle, const double* d_in, const bool* d_mask, const int* d_index, double* d_out0, double* d_out1, int batch_size, int n_obs); void divide_by_mask_execute(const raft::handle_t& handle, const int* d_in, const bool* d_mask, const int* d_index, int* d_out0, int* d_out1, int batch_size, int n_obs); /** * Batch division by minimum value step 1: build an index of which sub-batch * each series belongs to, an index of the position of each series in its new * batch, and measure the size of each sub-batch * * @param[in] handle cuML handle * @param[in] d_matrix Matrix of the values to minimize * Shape: (batch_size, n_sub) * @param[out] d_batch Which sub-batch each series belongs to * @param[out] d_index Index of each series in its new batch * @param[out] h_size Size of each sub-batch (host) * @param[in] batch_size Batch size * @param[in] n_sub Number of sub-batches */ void divide_by_min_build_index(const raft::handle_t& handle, const float* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub); void divide_by_min_build_index(const raft::handle_t& handle, const double* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub); /** * Batch division by minimum value step 2: create all the sub-batches * * @param[in] handle cuML handle * @param[in] d_in Input batch. Each series is a contiguous chunk * @param[in] d_batch Which sub-batch each series belongs to * @param[in] d_index Index of each series in its new sub-batch * @param[out] hd_out Host array of pointers to device arrays of each * sub-batch * @param[in] batch_size Batch size * @param[in] n_sub Number of sub-batches * @param[in] n_obs Number of data points per series */ void divide_by_min_execute(const raft::handle_t& handle, const float* d_in, const int* d_batch, const int* d_index, float** hd_out, int batch_size, int n_sub, int n_obs); void divide_by_min_execute(const raft::handle_t& handle, const double* d_in, const int* d_batch, const int* d_index, double** hd_out, int batch_size, int n_sub, int n_obs); void divide_by_min_execute(const raft::handle_t& handle, const int* d_in, const int* d_batch, const int* d_index, int** hd_out, int batch_size, int n_sub, int n_obs); /** * Build a map to associate each batch member with a model and index in the * associated sub-batch * * @param[in] handle cuML handle * @param[in] hd_id Host array of pointers to device arrays containing * the indices of the members of each sub-batch * @param[in] h_size Host array containing the size of each sub-batch * @param[out] d_id_to_pos Device array containing the position of each * member in its new sub-batch * @param[out] d_id_to_model Device array associating each member with its * sub-batch * @param[in] batch_size Batch size * @param[in] n_sub Number of sub-batches */ void build_division_map(const raft::handle_t& handle, const int* const* hd_id, const int* h_size, int* d_id_to_pos, int* d_id_to_model, int batch_size, int n_sub); /** * Merge multiple sub-batches into one batch according to the maps that * associate each id in the unique batch to a sub-batch and a position in * this sub-batch. * * @param[in] handle cuML handle * @param[in] hd_in Host array of pointers to device arrays containing * the sub-batches * @param[in] d_id_to_pos Device array containing the position of each member * in its new sub-batch * @param[in] d_id_to_sub Device array associating each member with its * sub-batch * @param[out] d_out Output merged batch * @param[in] batch_size Batch size * @param[in] n_sub Number of sub-batches * @param[in] n_obs Number of observations (or forecasts) per series */ void merge_series(const raft::handle_t& handle, const float* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, float* d_out, int batch_size, int n_sub, int n_obs); void merge_series(const raft::handle_t& handle, const double* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, double* d_out, int batch_size, int n_sub, int n_obs); } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/tsa/holtwinters_params.h
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { // The type of season that represents the time-series // ADDITIVE is with stable trend and level over periods // MULTIPLICATIVE is with increasing/decreasing trend and level over periods enum SeasonalType { ADDITIVE, MULTIPLICATIVE }; enum OptimCriterion { OPTIM_BFGS_ITER_LIMIT = 0, OPTIM_MIN_PARAM_DIFF = 1, OPTIM_MIN_ERROR_DIFF = 2, OPTIM_MIN_GRAD_NORM = 3, }; // These are the parameters used for optimizing alpha, beta, and gamma values template <typename Dtype> struct OptimParams { Dtype eps; Dtype min_param_diff; Dtype min_error_diff; Dtype min_grad_norm; int bfgs_iter_limit; int linesearch_iter_limit; Dtype linesearch_tau; Dtype linesearch_c; Dtype linesearch_step_size; }; enum Norm { L0, L1, L2, LINF }; } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/tsa/arima_common.h
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda_runtime.h> #include <algorithm> #include <raft/util/cudart_utils.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> namespace ML { /** * Structure to hold the ARIMA order (makes it easier to pass as an argument) */ struct ARIMAOrder { int p; // Basic order int d; int q; int P; // Seasonal order int D; int Q; int s; // Seasonal period int k; // Fit intercept? int n_exog; // Number of exogenous regressors inline int n_diff() const { return d + s * D; } inline int n_phi() const { return p + s * P; } inline int n_theta() const { return q + s * Q; } inline int r() const { return std::max(n_phi(), n_theta() + 1); } inline int rd() const { return n_diff() + r(); } inline int complexity() const { return p + P + q + Q + k + n_exog + 1; } inline bool need_diff() const { return static_cast<bool>(d + D); } }; /** * Structure to hold the parameters (makes it easier to pass as an argument) * @note: the qualifier const applied to this structure will only guarantee * that the pointers are not changed, but the user can still modify the * arrays when using the pointers directly! */ template <typename DataT> struct ARIMAParams { DataT* mu = nullptr; DataT* beta = nullptr; DataT* ar = nullptr; DataT* ma = nullptr; DataT* sar = nullptr; DataT* sma = nullptr; DataT* sigma2 = nullptr; /** * Allocate all the parameter device arrays * * @tparam AllocatorT Type of allocator used * @param[in] order ARIMA order * @param[in] batch_size Batch size * @param[in] stream CUDA stream * @param[in] tr Whether these are the transformed parameters */ void allocate(const ARIMAOrder& order, int batch_size, cudaStream_t stream, bool tr = false) { rmm::mr::device_memory_resource* rmm_alloc = rmm::mr::get_current_device_resource(); if (order.k && !tr) mu = (DataT*)rmm_alloc->allocate(batch_size * sizeof(DataT), stream); if (order.n_exog && !tr) beta = (DataT*)rmm_alloc->allocate(order.n_exog * batch_size * sizeof(DataT), stream); if (order.p) ar = (DataT*)rmm_alloc->allocate(order.p * batch_size * sizeof(DataT), stream); if (order.q) ma = (DataT*)rmm_alloc->allocate(order.q * batch_size * sizeof(DataT), stream); if (order.P) sar = (DataT*)rmm_alloc->allocate(order.P * batch_size * sizeof(DataT), stream); if (order.Q) sma = (DataT*)rmm_alloc->allocate(order.Q * batch_size * sizeof(DataT), stream); sigma2 = (DataT*)rmm_alloc->allocate(batch_size * sizeof(DataT), stream); } /** * Deallocate all the parameter device arrays * * @tparam AllocatorT Type of allocator used * @param[in] order ARIMA order * @param[in] batch_size Batch size * @param[in] stream CUDA stream * @param[in] tr Whether these are the transformed parameters */ void deallocate(const ARIMAOrder& order, int batch_size, cudaStream_t stream, bool tr = false) { rmm::mr::device_memory_resource* rmm_alloc = rmm::mr::get_current_device_resource(); if (order.k && !tr) rmm_alloc->deallocate(mu, batch_size * sizeof(DataT), stream); if (order.n_exog && !tr) rmm_alloc->deallocate(beta, order.n_exog * batch_size * sizeof(DataT), stream); if (order.p) rmm_alloc->deallocate(ar, order.p * batch_size * sizeof(DataT), stream); if (order.q) rmm_alloc->deallocate(ma, order.q * batch_size * sizeof(DataT), stream); if (order.P) rmm_alloc->deallocate(sar, order.P * batch_size * sizeof(DataT), stream); if (order.Q) rmm_alloc->deallocate(sma, order.Q * batch_size * sizeof(DataT), stream); rmm_alloc->deallocate(sigma2, batch_size * sizeof(DataT), stream); } /** * Pack the separate parameter arrays into a unique parameter vector * * @param[in] order ARIMA order * @param[in] batch_size Batch size * @param[out] param_vec Linear array of all parameters grouped by batch * [mu, ar, ma, sar, sma, sigma2] (device) * @param[in] stream CUDA stream */ void pack(const ARIMAOrder& order, int batch_size, DataT* param_vec, cudaStream_t stream) const { int N = order.complexity(); auto counting = thrust::make_counting_iterator(0); // The device lambda can't capture structure members... const DataT *_mu = mu, *_beta = beta, *_ar = ar, *_ma = ma, *_sar = sar, *_sma = sma, *_sigma2 = sigma2; thrust::for_each( thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { DataT* param = param_vec + bid * N; if (order.k) { *param = _mu[bid]; param++; } for (int i = 0; i < order.n_exog; i++) { param[i] = _beta[order.n_exog * bid + i]; } param += order.n_exog; for (int ip = 0; ip < order.p; ip++) { param[ip] = _ar[order.p * bid + ip]; } param += order.p; for (int iq = 0; iq < order.q; iq++) { param[iq] = _ma[order.q * bid + iq]; } param += order.q; for (int iP = 0; iP < order.P; iP++) { param[iP] = _sar[order.P * bid + iP]; } param += order.P; for (int iQ = 0; iQ < order.Q; iQ++) { param[iQ] = _sma[order.Q * bid + iQ]; } param += order.Q; *param = _sigma2[bid]; }); } /** * Unpack a parameter vector into separate arrays of parameters. * * @param[in] order ARIMA order * @param[in] batch_size Batch size * @param[in] param_vec Linear array of all parameters grouped by batch * [mu, ar, ma, sar, sma, sigma2] (device) * @param[in] stream CUDA stream */ void unpack(const ARIMAOrder& order, int batch_size, const DataT* param_vec, cudaStream_t stream) { int N = order.complexity(); auto counting = thrust::make_counting_iterator(0); // The device lambda can't capture structure members... DataT *_mu = mu, *_beta = beta, *_ar = ar, *_ma = ma, *_sar = sar, *_sma = sma, *_sigma2 = sigma2; thrust::for_each( thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int bid) { const DataT* param = param_vec + bid * N; if (order.k) { _mu[bid] = *param; param++; } for (int i = 0; i < order.n_exog; i++) { _beta[order.n_exog * bid + i] = param[i]; } param += order.n_exog; for (int ip = 0; ip < order.p; ip++) { _ar[order.p * bid + ip] = param[ip]; } param += order.p; for (int iq = 0; iq < order.q; iq++) { _ma[order.q * bid + iq] = param[iq]; } param += order.q; for (int iP = 0; iP < order.P; iP++) { _sar[order.P * bid + iP] = param[iP]; } param += order.P; for (int iQ = 0; iQ < order.Q; iQ++) { _sma[order.Q * bid + iQ] = param[iQ]; } param += order.Q; _sigma2[bid] = *param; }); } }; /** * Structure to manage ARIMA temporary memory allocations * @note The user is expected to give a preallocated buffer to the constructor, * and ownership is not transferred to this struct! The buffer must be allocated * as long as the object lives, and deallocated afterwards. */ template <typename T, int ALIGN = 256> struct ARIMAMemory { T *params_mu, *params_beta, *params_ar, *params_ma, *params_sar, *params_sma, *params_sigma2, *Tparams_ar, *Tparams_ma, *Tparams_sar, *Tparams_sma, *Tparams_sigma2, *d_params, *d_Tparams, *Z_dense, *R_dense, *T_dense, *RQR_dense, *RQ_dense, *P_dense, *alpha_dense, *ImT_dense, *ImT_inv_dense, *v_tmp_dense, *m_tmp_dense, *K_dense, *TP_dense, *pred, *y_diff, *exog_diff, *loglike, *loglike_base, *loglike_pert, *x_pert, *I_m_AxA_dense, *I_m_AxA_inv_dense, *Ts_dense, *RQRs_dense, *Ps_dense; T **Z_batches, **R_batches, **T_batches, **RQR_batches, **RQ_batches, **P_batches, **alpha_batches, **ImT_batches, **ImT_inv_batches, **v_tmp_batches, **m_tmp_batches, **K_batches, **TP_batches, **I_m_AxA_batches, **I_m_AxA_inv_batches, **Ts_batches, **RQRs_batches, **Ps_batches; int *ImT_inv_P, *ImT_inv_info, *I_m_AxA_P, *I_m_AxA_info; size_t size; protected: char* buf; template <bool assign, typename ValType> inline void append_buffer(ValType*& ptr, size_t n_elem) { if (assign) { ptr = reinterpret_cast<ValType*>(buf + size); } size += ((n_elem * sizeof(ValType) + ALIGN - 1) / ALIGN) * ALIGN; } template <bool assign> inline void buf_offsets(const ARIMAOrder& order, int batch_size, int n_obs, char* in_buf = nullptr) { buf = in_buf; size = 0; int r = order.r(); int rd = order.rd(); int N = order.complexity(); int n_diff = order.n_diff(); append_buffer<assign>(params_mu, order.k * batch_size); append_buffer<assign>(params_beta, order.n_exog * batch_size); append_buffer<assign>(params_ar, order.p * batch_size); append_buffer<assign>(params_ma, order.q * batch_size); append_buffer<assign>(params_sar, order.P * batch_size); append_buffer<assign>(params_sma, order.Q * batch_size); append_buffer<assign>(params_sigma2, batch_size); append_buffer<assign>(Tparams_ar, order.p * batch_size); append_buffer<assign>(Tparams_ma, order.q * batch_size); append_buffer<assign>(Tparams_sar, order.P * batch_size); append_buffer<assign>(Tparams_sma, order.Q * batch_size); append_buffer<assign>(Tparams_sigma2, batch_size); append_buffer<assign>(d_params, N * batch_size); append_buffer<assign>(d_Tparams, N * batch_size); append_buffer<assign>(Z_dense, rd * batch_size); append_buffer<assign>(Z_batches, batch_size); append_buffer<assign>(R_dense, rd * batch_size); append_buffer<assign>(R_batches, batch_size); append_buffer<assign>(T_dense, rd * rd * batch_size); append_buffer<assign>(T_batches, batch_size); append_buffer<assign>(RQ_dense, rd * batch_size); append_buffer<assign>(RQ_batches, batch_size); append_buffer<assign>(RQR_dense, rd * rd * batch_size); append_buffer<assign>(RQR_batches, batch_size); append_buffer<assign>(P_dense, rd * rd * batch_size); append_buffer<assign>(P_batches, batch_size); append_buffer<assign>(alpha_dense, rd * batch_size); append_buffer<assign>(alpha_batches, batch_size); append_buffer<assign>(ImT_dense, r * r * batch_size); append_buffer<assign>(ImT_batches, batch_size); append_buffer<assign>(ImT_inv_dense, r * r * batch_size); append_buffer<assign>(ImT_inv_batches, batch_size); append_buffer<assign>(ImT_inv_P, r * batch_size); append_buffer<assign>(ImT_inv_info, batch_size); append_buffer<assign>(v_tmp_dense, rd * batch_size); append_buffer<assign>(v_tmp_batches, batch_size); append_buffer<assign>(m_tmp_dense, rd * rd * batch_size); append_buffer<assign>(m_tmp_batches, batch_size); append_buffer<assign>(K_dense, rd * batch_size); append_buffer<assign>(K_batches, batch_size); append_buffer<assign>(TP_dense, rd * rd * batch_size); append_buffer<assign>(TP_batches, batch_size); append_buffer<assign>(pred, n_obs * batch_size); append_buffer<assign>(y_diff, n_obs * batch_size); append_buffer<assign>(exog_diff, n_obs * order.n_exog * batch_size); append_buffer<assign>(loglike, batch_size); append_buffer<assign>(loglike_base, batch_size); append_buffer<assign>(loglike_pert, batch_size); append_buffer<assign>(x_pert, N * batch_size); if (n_diff > 0) { append_buffer<assign>(Ts_dense, r * r * batch_size); append_buffer<assign>(Ts_batches, batch_size); append_buffer<assign>(RQRs_dense, r * r * batch_size); append_buffer<assign>(RQRs_batches, batch_size); append_buffer<assign>(Ps_dense, r * r * batch_size); append_buffer<assign>(Ps_batches, batch_size); } if (r <= 5) { // Note: temp mem for the direct Lyapunov solver grows very quickly! // This solver is used iff the condition above is satisfied append_buffer<assign>(I_m_AxA_dense, r * r * r * r * batch_size); append_buffer<assign>(I_m_AxA_batches, batch_size); append_buffer<assign>(I_m_AxA_inv_dense, r * r * r * r * batch_size); append_buffer<assign>(I_m_AxA_inv_batches, batch_size); append_buffer<assign>(I_m_AxA_P, r * r * batch_size); append_buffer<assign>(I_m_AxA_info, batch_size); } } /** Protected constructor to estimate max size */ ARIMAMemory(const ARIMAOrder& order, int batch_size, int n_obs) { buf_offsets<false>(order, batch_size, n_obs); } public: /** Constructor to create pointers from buffer * @param[in] order ARIMA order * @param[in] batch_size Number of series in the batch * @param[in] n_obs Length of the series * @param[in] in_buf Pointer to the temporary memory buffer. * Ownership is retained by the caller */ ARIMAMemory(const ARIMAOrder& order, int batch_size, int n_obs, char* in_buf) { buf_offsets<true>(order, batch_size, n_obs, in_buf); } /** Static method to get the size of the required buffer allocation * @param[in] order ARIMA order * @param[in] batch_size Number of series in the batch * @param[in] n_obs Length of the series * @return Buffer size in bytes */ static size_t compute_size(const ARIMAOrder& order, int batch_size, int n_obs) { ARIMAMemory temp(order, batch_size, n_obs); return temp.size; } }; } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/explainer/tree_shap.hpp
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <cstdint> #include <cuml/ensemble/treelite_defs.hpp> #include <memory> #include <variant> namespace ML { namespace Explainer { template <typename T> class TreePathInfo; using TreePathHandle = std::variant<std::shared_ptr<TreePathInfo<float>>, std::shared_ptr<TreePathInfo<double>>>; using FloatPointer = std::variant<float*, double*>; TreePathHandle extract_path_info(ModelHandle model); void gpu_treeshap(TreePathHandle path_info, const FloatPointer data, std::size_t n_rows, std::size_t n_cols, FloatPointer out_preds, std::size_t out_preds_size); void gpu_treeshap_interventional(TreePathHandle path_info, const FloatPointer data, std::size_t n_rows, std::size_t n_cols, const FloatPointer background_data, std::size_t background_n_rows, std::size_t background_n_cols, FloatPointer out_preds, std::size_t out_preds_size); void gpu_treeshap_interactions(TreePathHandle path_info, const FloatPointer data, std::size_t n_rows, std::size_t n_cols, FloatPointer out_preds, std::size_t out_preds_size); void gpu_treeshap_taylor_interactions(TreePathHandle path_info, const FloatPointer data, std::size_t n_rows, std::size_t n_cols, FloatPointer out_preds, std::size_t out_preds_size); } // namespace Explainer } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/explainer/permutation_shap.hpp
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace raft { class handle_t; } namespace ML { namespace Explainer { /** * Generates a dataset by tiling the `background` matrix into `out`, while * adding a forward and backward permutation pass of the observation `row` * on the positions defined by `idx`. Example: * * background = [[0, 1, 2], [3, 4, 5], [6, 7, 8]] * idx = [2, 0, 1] * row = [100, 101, 102] * output: * [[ 0, 1, 2] * [ 3, 4, 5] * [ 6, 7, 8] * [ 0, 1, 102] * [ 3, 4, 102] * [ 6, 7, 102] * [100, 1, 102] * [100, 4, 102] * [100, 7, 102] * [100, 101, 102] * [100, 101, 102] * [100, 101, 102] * [100, 101, 2] * [100, 101, 5] * [100, 101, 8] * [ 0, 101, 2] * [ 3, 101, 5] * [ 6, 101, 8] * [ 0, 1, 2] * [ 3, 4, 5] * [ 6, 7, 8]] * * * @param[in] handle cuML handle * @param[out] dataset generated data in either row major or column major * format, depending on the `row_major` parameter [on device] * [dim = (2 * ncols * nrows_bg + nrows_bg) * ncols] * @param[in] background background data [on device] [dim = ncols * nrows_bg] * @param[in] nrows_bg number of rows in background dataset * @param[in] ncols number of columns * @param[in] row row to scatter in a permutated fashion [dim = ncols] * @param[in] idx permutation indexes [dim = ncols] * @param[in] row_major boolean to generate either row or column major data * */ void permutation_shap_dataset(const raft::handle_t& handle, float* dataset, const float* background, int nrows_bg, int ncols, const float* row, int* idx, bool row_major); void permutation_shap_dataset(const raft::handle_t& handle, double* dataset, const double* background, int nrows_bg, int ncols, const double* row, int* idx, bool row_major); /** * Generates a dataset by tiling the `background` matrix into `out`, while * adding a forward and backward permutation pass of the observation `row` * on the positions defined by `idx`. Example: * * background = [[0, 1, 2], [3, 4, 5], [6, 7, 8]] * idx = [2, 0, 1] * row = [100, 101, 102] * output: * [[ 0, 1, 2] * [ 3, 4, 5] * [ 6, 7, 8] * [ 0, 1, 102] * [ 3, 4, 102] * [ 6, 7, 102] * [100, 1, 2] * [100, 4, 5] * [100, 7, 8] * [ 0, 101, 2] * [ 3, 101, 5] * [ 6, 101, 8]] * * * @param[in] handle cuML handle * @param[out] dataset generated data [on device] [dim = (2 * ncols * nrows_bg + nrows_bg) * * ncols] * @param[in] background background data [on device] [dim = ncols * nrows_bg] * @param[in] nrows_bg number of rows in background dataset * @param[in] ncols number of columns * @param[in] row row to scatter in a permutated fashion [dim = ncols] * @param[in] idx permutation indexes [dim = ncols] * @param[in] row_major boolean to generate either row or column major data * */ void shap_main_effect_dataset(const raft::handle_t& handle, float* dataset, const float* background, int nrows_bg, int ncols, const float* row, int* idx, bool row_major); void shap_main_effect_dataset(const raft::handle_t& handle, double* dataset, const double* background, int nrows_bg, int ncols, const double* row, int* idx, bool row_major); /** * Function that aggregates averages of the averatge of results of the model * called with the permutation dataset, to estimate the SHAP values. * It is equivalent to the Python code: * for i,ind in enumerate(idx): * shap_values[ind] += y_hat[i + 1] - y_hat[i] * for i,ind in enumerate(idx): * shap_values[ind] += y_hat[i + ncols] - y_hat[i + ncols + 1] * * @param[in] handle cuML handle * @param[out] shap_values Array where the results are aggregated [dim = ncols] * @param[in] y_hat Results to use for the aggregation [dim = ncols + 1] * @param[in] ncols number of columns * @param[in] idx permutation indexes [dim = ncols] */ void update_perm_shap_values(const raft::handle_t& handle, float* shap_values, const float* y_hat, const int ncols, const int* idx); void update_perm_shap_values(const raft::handle_t& handle, double* shap_values, const double* y_hat, const int ncols, const int* idx); } // namespace Explainer } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/explainer/kernel_shap.hpp
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstdint> namespace raft { class handle_t; } namespace ML { namespace Explainer { /** * Generates samples of dataset for kernel shap algorithm. * * * @param[in] handle cuML handle * @param[inout] X generated data [on device] 1-0 (row major) * @param[in] nrows_X number of rows in X * @param[in] ncols number of columns in X, background and dataset * @param[in] background background data [on device] * @param[in] nrows_background number of rows in background dataset * @param[out] dataset generated data [on device] observation=background (row major) * @param[in] observation row to scatter * @param[in] nsamples vector with number of entries that are randomly sampled * @param[in] len_nsamples number of entries to be sampled * @param[in] maxsample size of the biggest sampled observation * @param[in] seed Seed for the random number generator * * Kernel distrubutes exact part of the kernel shap dataset * Each block scatters the data of a row of `observations` into the (number of rows of * background) in `dataset`, based on the row of `X`. * So, given: * background = [[0, 1, 2], [3, 4, 5]] * observation = [100, 101, 102] * X = [[1, 0, 1], * [0, 1, 1]] * * dataset (output): * [[100, 1, 102], * [100, 4, 102] * [0, 101, 102], * [3, 101, 102]] * The first thread of each block calculates the sampling of `k` entries of `observation` * to scatter into `dataset`. Afterwards each block scatters the data of a row of `X` into * the (number of rows of background) in `dataset`. * So, given: * background = [[0, 1, 2, 3], * [5, 6, 7, 8]] * observation = [100, 101, 102, 103] * nsamples = [3, 2] * * X (output) * [[1, 0, 1, 1], * [0, 1, 1, 0]] * * dataset (output): * [[100, 1, 102, 103], * [100, 6, 102, 103] * [0, 101, 102, 3], * [5, 101, 102, 8]] */ void kernel_dataset(const raft::handle_t& handle, float* X, int nrows_X, int ncols, float* background, int nrows_background, float* dataset, float* observation, int* nsamples, int len_nsamples, int maxsample, uint64_t seed = 0ULL); void kernel_dataset(const raft::handle_t& handle, float* X, int nrows_X, int ncols, double* background, int nrows_background, double* dataset, double* observation, int* nsamples, int len_nsamples, int maxsample, uint64_t seed = 0ULL); } // namespace Explainer } // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/tree/algo_helper.h
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { enum CRITERION { GINI, ENTROPY, MSE, MAE, POISSON, GAMMA, INVERSE_GAUSSIAN, CRITERION_END, }; }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/tree/decisiontree.hpp
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "algo_helper.h" #include "flatnode.h" #include <string> #include <vector> namespace ML { namespace DT { struct DecisionTreeParams { /** * Maximum tree depth. Unlimited (e.g., until leaves are pure), If `-1`. */ int max_depth; /** * Maximum leaf nodes per tree. Soft constraint. Unlimited, If `-1`. */ int max_leaves; /** * Ratio of number of features (columns) to consider per node split. */ float max_features; /** * maximum number of bins used by the split algorithm per feature. */ int max_n_bins; /** * The minimum number of samples (rows) in each leaf node. */ int min_samples_leaf; /** * The minimum number of samples (rows) needed to split an internal node. */ int min_samples_split; /** * Node split criterion. GINI and Entropy for classification, MSE for regression. */ CRITERION split_criterion; /** * Minimum impurity decrease required for splitting a node. If the impurity decrease is below this * value, node is leafed out. Default is 0.0 */ float min_impurity_decrease = 0.0f; /** * Maximum number of nodes that can be processed in a given batch. This is * used only for batched-level algo */ int max_batch_size; }; /** * @brief Set all DecisionTreeParams members. * @param[in,out] params: update with tree parameters * @param[in] cfg_max_depth: maximum tree depth; default -1 * @param[in] cfg_max_leaves: maximum leaves; default -1 * @param[in] cfg_max_features: maximum number of features; default 1.0f * @param[in] cfg_max_n_bins: maximum number of bins; default 128 * @param[in] cfg_min_samples_leaf: min. rows in each leaf node; default 1 * @param[in] cfg_min_samples_split: min. rows needed to split an internal node; * default 2 * @param[in] cfg_min_impurity_decrease: split a node only if its reduction in * impurity is more than this value * @param[in] cfg_split_criterion: split criterion; default CRITERION_END, * i.e., GINI for classification or MSE for regression * @param[in] cfg_max_batch_size: Maximum number of nodes that can be processed in a batch. This is used only for batched-level algo. Default value 4096. */ void set_tree_params(DecisionTreeParams& params, int cfg_max_depth = -1, int cfg_max_leaves = -1, float cfg_max_features = 1.0f, int cfg_max_n_bins = 128, int cfg_min_samples_leaf = 1, int cfg_min_samples_split = 2, float cfg_min_impurity_decrease = 0.0f, CRITERION cfg_split_criterion = CRITERION_END, int cfg_max_batch_size = 4096); template <class T, class L> struct TreeMetaDataNode { int treeid; int depth_counter; int leaf_counter; double train_time; std::vector<T> vector_leaf; std::vector<SparseTreeNode<T, L>> sparsetree; int num_outputs; }; /** * @brief Obtain high-level tree information. * @tparam T: data type for input data (float or double). * @tparam L: data type for labels (int type for classification, T type for regression). * @param[in] tree: CPU pointer to TreeMetaDataNode * @return High-level tree information as string */ template <class T, class L> std::string get_tree_summary_text(const TreeMetaDataNode<T, L>* tree); /** * @brief Obtain detailed tree information. * @tparam T: data type for input data (float or double). * @tparam L: data type for labels (int type for classification, T type for regression). * @param[in] tree: CPU pointer to TreeMetaDataNode * @return Detailed tree information as string */ template <class T, class L> std::string get_tree_text(const TreeMetaDataNode<T, L>* tree); /** * @brief Export tree as a JSON string * @tparam T: data type for input data (float or double). * @tparam L: data type for labels (int type for classification, T type for regression). * @param[in] tree: CPU pointer to TreeMetaDataNode * @return Tree structure as JSON stsring */ template <class T, class L> std::string get_tree_json(const TreeMetaDataNode<T, L>* tree); typedef TreeMetaDataNode<float, int> TreeClassifierF; typedef TreeMetaDataNode<double, int> TreeClassifierD; typedef TreeMetaDataNode<float, float> TreeRegressorF; typedef TreeMetaDataNode<double, double> TreeRegressorD; } // End namespace DT } // End namespace ML
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/tree/flatnode.h
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once // We want to define some functions as usable on device // But need to guard against this file being compiled by a host compiler #ifdef __CUDACC__ #define FLATNODE_HD __host__ __device__ #else #define FLATNODE_HD #endif /** * A node in Decision Tree. * @tparam T data type * @tparam L label type * @tparam IdxT type used for indexing operations */ template <typename DataT, typename LabelT, typename IdxT = int> struct SparseTreeNode { private: IdxT colid = 0; DataT quesval = DataT(0); DataT best_metric_val = DataT(0); IdxT left_child_id = -1; IdxT instance_count = 0; FLATNODE_HD SparseTreeNode( IdxT colid, DataT quesval, DataT best_metric_val, int64_t left_child_id, IdxT instance_count) : colid(colid), quesval(quesval), best_metric_val(best_metric_val), left_child_id(left_child_id), instance_count(instance_count) { } public: FLATNODE_HD IdxT ColumnId() const { return colid; } FLATNODE_HD DataT QueryValue() const { return quesval; } FLATNODE_HD DataT BestMetric() const { return best_metric_val; } FLATNODE_HD int64_t LeftChildId() const { return left_child_id; } FLATNODE_HD int64_t RightChildId() const { return left_child_id + 1; } FLATNODE_HD IdxT InstanceCount() const { return instance_count; } FLATNODE_HD static SparseTreeNode CreateSplitNode( IdxT colid, DataT quesval, DataT best_metric_val, int64_t left_child_id, IdxT instance_count) { return SparseTreeNode<DataT, LabelT>{ colid, quesval, best_metric_val, left_child_id, instance_count}; } FLATNODE_HD static SparseTreeNode CreateLeafNode(IdxT instance_count) { return SparseTreeNode<DataT, LabelT>{0, 0, 0, -1, instance_count}; } FLATNODE_HD bool IsLeaf() const { return left_child_id == -1; } bool operator==(const SparseTreeNode& other) const { return (this->colid == other.colid) && (this->quesval == other.quesval) && (this->best_metric_val == other.best_metric_val) && (this->left_child_id == other.left_child_id) && (this->instance_count == other.instance_count); } };
0
rapidsai_public_repos/cuml/cpp/include/cuml
rapidsai_public_repos/cuml/cpp/include/cuml/neighbors/knn_mg.hpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/handle.hpp> #include <vector> #include <cumlprims/opg/matrix/data.hpp> #include <cumlprims/opg/matrix/part_descriptor.hpp> namespace ML { namespace KNN { namespace opg { using namespace MLCommon; /** * Performs a multi-node multi-GPU KNN. * @param[in] handle the raft::handle_t to use for managing resources * @param[out] out_I vector of output index partitions. size should match the * number of local input partitions. * @param[out] out_D vector of output distance partitions. size should match * the number of local input partitions. * @param[in] idx_data vector of local indices to query * @param[in] idx_desc describes how the index partitions are distributed * across the ranks. * @param[in] query_data vector of local query partitions * @param[in] query_desc describes how the query partitions are distributed * across the cluster. * @param[in] rowMajorIndex boolean indicating whether the index is row major. * @param[in] rowMajorQuery boolean indicating whether the query is row major. * @param[in] k the number of neighbors to query * @param[in] batch_size the max number of rows to broadcast at a time * @param[in] verbose print extra logging info */ void knn(raft::handle_t& handle, std::vector<Matrix::Data<int64_t>*>* out_I, std::vector<Matrix::floatData_t*>* out_D, std::vector<Matrix::floatData_t*>& idx_data, Matrix::PartDescriptor& idx_desc, std::vector<Matrix::floatData_t*>& query_data, Matrix::PartDescriptor& query_desc, bool rowMajorIndex, bool rowMajorQuery, int k, size_t batch_size, bool verbose); /** * Performs a multi-node multi-GPU KNN classify. * @param[in] handle the raft::handle_t to use for managing resources * @param[out] out vector of output labels partitions. size should match the * number of local input partitions. * @param[in] probas (optional) pointer to a vector containing arrays of probabilities * @param[in] idx_data vector of local indices to query * @param[in] idx_desc describes how the index partitions are distributed * across the ranks. * @param[in] query_data vector of local query partitions * @param[in] query_desc describes how the query partitions are distributed * across the cluster. * @param[in] y vector of vector of label arrays. for multilabel classification, each * element in the vector is a different "output" array of labels corresponding * to the i'th output. size should match the number of local input partitions. * @param[in] uniq_labels vector of the sorted unique labels for each array in y * @param[in] n_unique vector of sizes for each array in uniq_labels * @param[in] rowMajorIndex boolean indicating whether the index is row major. * @param[in] rowMajorQuery boolean indicating whether the query is row major. * @param[in] probas_only return probas instead of performing complete knn_classify * @param[in] k the number of neighbors to query * @param[in] batch_size the max number of rows to broadcast at a time * @param[in] verbose print extra logging info */ void knn_classify(raft::handle_t& handle, std::vector<Matrix::Data<int>*>* out, std::vector<std::vector<float*>>* probas, std::vector<Matrix::floatData_t*>& idx_data, Matrix::PartDescriptor& idx_desc, std::vector<Matrix::floatData_t*>& query_data, Matrix::PartDescriptor& query_desc, std::vector<std::vector<int*>>& y, std::vector<int*>& uniq_labels, std::vector<int>& n_unique, bool rowMajorIndex = false, bool rowMajorQuery = false, bool probas_only = false, int k = 10, size_t batch_size = 1 << 15, bool verbose = false); /** * Performs a multi-node multi-GPU KNN regress. * @param[in] handle the raft::handle_t to use for managing resources * @param[out] out vector of output partitions. size should match the * number of local input partitions. * @param[in] idx_data vector of local indices to query * @param[in] idx_desc describes how the index partitions are distributed * across the ranks. * @param[in] query_data vector of local query partitions * @param[in] query_desc describes how the query partitions are distributed * across the cluster. * @param[in] y vector of vector of output arrays. for multi-output regression, each * element in the vector is a different "output" array corresponding * to the i'th output. size should match the number of local input partitions. * @param[in] rowMajorIndex boolean indicating whether the index is row major. * @param[in] rowMajorQuery boolean indicating whether the query is row major. * @param[in] k the number of neighbors to query * @param[in] n_outputs number of outputs * @param[in] batch_size the max number of rows to broadcast at a time * @param[in] verbose print extra logging info */ void knn_regress(raft::handle_t& handle, std::vector<Matrix::Data<float>*>* out, std::vector<Matrix::floatData_t*>& idx_data, Matrix::PartDescriptor& idx_desc, std::vector<Matrix::floatData_t*>& query_data, Matrix::PartDescriptor& query_desc, std::vector<std::vector<float*>>& y, bool rowMajorIndex, bool rowMajorQuery, int k, int n_outputs, size_t batch_size, bool verbose); }; // END namespace opg }; // namespace KNN }; // namespace ML
0