repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/svm/svc.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include "kernelcache.cuh" #include "smosolver.cuh" #include "svc_impl.cuh" #include <cuml/svm/svc.hpp> #include <raft/core/handle.hpp> #include <raft/distance/distance_types.hpp> #include <raft/distance/kernels.cuh> #include <raft/label/classlabels.cuh> #include <raft/linalg/unary_op.cuh> namespace ML { namespace SVM { using namespace MLCommon; using namespace raft::distance::kernels; // Explicit instantiation for the library template void svcFit<float>(const raft::handle_t& handle, float* input, int n_rows, int n_cols, float* labels, const SvmParameter& param, KernelParams& kernel_params, SvmModel<float>& model, const float* sample_weight); template void svcFit<double>(const raft::handle_t& handle, double* input, int n_rows, int n_cols, double* labels, const SvmParameter& param, KernelParams& kernel_params, SvmModel<double>& model, const double* sample_weight); template void svcFitSparse<float>(const raft::handle_t& handle, int* indptr, int* indices, float* data, int n_rows, int n_cols, int nnz, float* labels, const SvmParameter& param, KernelParams& kernel_params, SvmModel<float>& model, const float* sample_weight); template void svcFitSparse<double>(const raft::handle_t& handle, int* indptr, int* indices, double* data, int n_rows, int n_cols, int nnz, double* labels, const SvmParameter& param, KernelParams& kernel_params, SvmModel<double>& model, const double* sample_weight); template void svcPredict<float>(const raft::handle_t& handle, float* input, int n_rows, int n_cols, KernelParams& kernel_params, const SvmModel<float>& model, float* preds, float buffer_size, bool predict_class); template void svcPredict<double>(const raft::handle_t& handle, double* input, int n_rows, int n_cols, KernelParams& kernel_params, const SvmModel<double>& model, double* preds, double buffer_size, bool predict_class); template void svcPredictSparse<float>(const raft::handle_t& handle, int* indptr, int* indices, float* data, int n_rows, int n_cols, int nnz, KernelParams& kernel_params, const SvmModel<float>& model, float* preds, float buffer_size, bool predict_class); template void svcPredictSparse<double>(const raft::handle_t& handle, int* indptr, int* indices, double* data, int n_rows, int n_cols, int nnz, KernelParams& kernel_params, const SvmModel<double>& model, double* preds, double buffer_size, bool predict_class); template void svmFreeBuffers(const raft::handle_t& handle, SvmModel<float>& m); template void svmFreeBuffers(const raft::handle_t& handle, SvmModel<double>& m); template <typename math_t> SVC<math_t>::SVC(raft::handle_t& handle, math_t C, math_t tol, KernelParams kernel_params, math_t cache_size, int max_iter, int nochange_steps, int verbosity) : handle(handle), param(SvmParameter{C, cache_size, max_iter, nochange_steps, tol, verbosity}), kernel_params(kernel_params) { model.n_support = 0; model.dual_coefs = nullptr; model.support_matrix = {}; model.support_idx = nullptr; model.unique_labels = nullptr; } template <typename math_t> SVC<math_t>::~SVC() { svmFreeBuffers(handle, model); } template <typename math_t> void SVC<math_t>::fit( math_t* input, int n_rows, int n_cols, math_t* labels, const math_t* sample_weight) { model.n_cols = n_cols; if (model.dual_coefs) svmFreeBuffers(handle, model); svcFit(handle, input, n_rows, n_cols, labels, param, kernel_params, model, sample_weight); } template <typename math_t> void SVC<math_t>::predict(math_t* input, int n_rows, int n_cols, math_t* preds) { math_t buffer_size = param.cache_size; svcPredict(handle, input, n_rows, n_cols, kernel_params, model, preds, buffer_size, true); } template <typename math_t> void SVC<math_t>::decisionFunction(math_t* input, int n_rows, int n_cols, math_t* preds) { math_t buffer_size = param.cache_size; svcPredict(handle, input, n_rows, n_cols, kernel_params, model, preds, buffer_size, false); } // Instantiate templates for the shared library template class SVC<float>; template class SVC<double>; }; // namespace SVM }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/svm/workingset.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "smo_sets.cuh" #include "ws_util.cuh" #include <cuml/common/logger.hpp> #include <cuml/svm/svm_parameter.h> #include <raft/linalg/init.cuh> #include "smo_sets.cuh" #include "ws_util.cuh" #include <cub/cub.cuh> #include <raft/core/handle.hpp> #include <raft/linalg/add.cuh> #include <raft/linalg/unary_op.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include <thrust/device_ptr.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/iterator/permutation_iterator.h> #include <cub/cub.cuh> #include <algorithm> #include <cstddef> #include <limits> namespace ML { namespace SVM { namespace { // placeholder function passed to configuration call to Cub::DeviceSelect __device__ bool always_true(int) { return true; } } // end unnamed namespace /** * Working set selection for the SMO algorithm. * * The working set is a subset of the training vectors, by default it has 1024 elements. * At every outer iteration in SmoSolver::Solve, we select a different working set, and * optimize the dual coefficients for the working set. * * The vectors are selected based on the f values, which is the difference between the * target label and the decision function value. */ template <typename math_t> class WorkingSet { public: //!> Workspace selection strategy, note that only FIFO is tested so far bool FIFO_strategy = true; /** * @brief Manage a working set. * * @param handle cuml handle implementation * @param stream cuda stream for working set operations * @param n_rows number of training vectors * @param n_ws number of elements in the working set (default 1024) * @param svmType classification or regression */ WorkingSet(const raft::handle_t& handle, cudaStream_t stream, int n_rows = 0, int n_ws = 0, SvmType svmType = C_SVC) : handle(handle), stream(stream), svmType(svmType), n_rows(n_rows), available(0, stream), available_sorted(0, stream), cub_storage(0, stream), f_idx(0, stream), f_idx_sorted(0, stream), f_sorted(0, stream), idx_tmp(0, stream), idx(0, stream), ws_idx_sorted(0, stream), ws_idx_selected(0, stream), ws_idx_save(0, stream), ws_priority(0, stream), ws_priority_sorted(0, stream), d_num_selected(stream) { n_train = (svmType == EPSILON_SVR) ? n_rows * 2 : n_rows; SetSize(n_train, n_ws); } ~WorkingSet() {} /** * @brief Set the size of the working set and allocate buffers accordingly. * * @param n_train number of training vectors * @param n_ws working set size (default min(1024, n_train)) */ void SetSize(int n_train, int n_ws = 0) { if (n_ws == 0 || n_ws > n_train) { n_ws = n_train; } n_ws = std::min(1024, n_ws); this->n_ws = n_ws; CUML_LOG_DEBUG("Creating working set with %d elements", n_ws); AllocateBuffers(); } /** Return the size of the working set. */ int GetSize() { return n_ws; } /** * @brief Return a device pointer to the the working set indices. * * The returned array is owned by WorkingSet. */ int* GetIndices() { return idx.data(); } /** * @brief Select new elements for a working set. * * Here we follow the working set selection strategy by Joachims [1], we * select n training instances as: * - select n/2 element of upper set, where f is largest * - select n/2 from lower set, where f is smallest * * The difference compared to Joachims' strategy is that we can already have * some elements selected by a different strategy, therefore we select only * n = n_ws - n_already_selected. * * References: * [1] Joachims, T. (1998). Making large-scale support vector machine learning * practical. In B. Scholkopf, C. Burges, & A. Smola (Eds.), Advances in * kernel methods: Support vector machines. Cambridge, MA: MIT Press * * @param f optimality indicator vector, size [n_train] * @param alpha dual coefficients, size [n_train] * @param y target labels (+/- 1) * @param C penalty parameter vector size [n_train] * @param n_already_selected */ void SimpleSelect( math_t* f, math_t* alpha, math_t* y, const math_t* C, int n_already_selected = 0) { // We are not using the topK kernel, because of the additional lower/upper // constraint int n_needed = n_ws - n_already_selected; // Zero the priority of the elements that will be newly selected RAFT_CUDA_TRY( cudaMemsetAsync(ws_priority.data() + n_already_selected, 0, n_needed * sizeof(int), stream)); cub::DeviceRadixSort::SortPairs((void*)cub_storage.data(), cub_bytes, f, f_sorted.data(), f_idx.data(), f_idx_sorted.data(), n_train, 0, (int)8 * sizeof(math_t), stream); if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG) && n_train < 20) { std::stringstream ss; raft::print_device_vector("idx_sorted", f_idx_sorted.data(), n_train, ss); CUML_LOG_DEBUG(ss.str().c_str()); } // Select n_ws/2 elements from the upper set with the smallest f value bool* available = this->available.data(); set_upper<<<raft::ceildiv(n_train, TPB), TPB, 0, stream>>>(available, n_train, alpha, y, C); RAFT_CUDA_TRY(cudaPeekAtLastError()); n_already_selected += GatherAvailable(n_already_selected, n_needed / 2, true); // Select n_ws/2 elements from the lower set with the highest f values set_lower<<<raft::ceildiv(n_train, TPB), TPB, 0, stream>>>(available, n_train, alpha, y, C); RAFT_CUDA_TRY(cudaPeekAtLastError()); n_already_selected += GatherAvailable(n_already_selected, n_ws - n_already_selected, false); // In case we could not find enough elements, then we just fill using the // still available elements. if (n_already_selected < n_ws) { CUML_LOG_WARN( "Warning: could not fill working set, found only %d" " elements", n_already_selected); CUML_LOG_DEBUG("Filling up with unused elements"); RAFT_CUDA_TRY(cudaMemset(available, 1, sizeof(bool) * n_train)); n_already_selected += GatherAvailable(n_already_selected, n_ws - n_already_selected, true); } } /** * @brief Select working set indices. * * To avoid training vectors oscillating in and out of the working set, we * keep half of the previous working set, and fill new elements only to the * other half. * * We can have a FIFO retention policy, or we can * consider the time (=ws_priority) a vector already spent in the ws. * References: * [1] Z. Wen et al. ThunderSVM: A Fast SVM Library on GPUs and CPUs, Journal * of Machine Learning Research, 19, 1-5 (2018) * * @param f optimality indicator vector, size [n_train] * @param alpha dual coefficients, size [n_train] * @param y class labels, size [n_train] * @param C penalty parameter vector, size [n_train] */ void Select(math_t* f, math_t* alpha, math_t* y, const math_t* C) { if (n_ws >= n_train) { // All elements are selected, we have initialized idx to cover this case return; } int nc = n_ws / 4; int n_selected = 0; if (firstcall) { if (nc >= 1) { firstcall = false; } else { // This can only happen for n_ws < 4. // We keep the calculation always in firstcall mode (only SimpleSelect // is used, no advanced strategies because we do not have enough elements) // // Nothing to do, firstcall is already true } } else { // keep 1/2 of the old working set if (FIFO_strategy) { // FIFO selection following ThunderSVM raft::copy(idx.data(), ws_idx_save.data() + 2 * nc, 2 * nc, stream); n_selected = nc * 2; } else { // priority based selection preferring to keep newer elements in ws n_selected = PrioritySelect(alpha, C, nc); } } SimpleSelect(f, alpha, y, C, n_selected); raft::copy(ws_idx_save.data(), idx.data(), n_ws, stream); } /** * @brief Select elements from the previous working set based on their priority. * * We sort the old working set based on their priority in ascending order, * and then select nc elements from free, and then lower/upper bound vectors. * For details see [2]. * * See Issue #946. * * References: * [2] T Serafini, L Zanni: On the Working Set selection in grad. projection * based decomposition techniques for Support Vector Machines * DOI: 10.1080/10556780500140714 * * @param [in] alpha device vector of dual coefficients, size [n_train] * @param [in] C_vec penalty parameter * @param [in] nc number of elements to select */ int PrioritySelect(math_t* alpha, const math_t* C, int nc) { int n_selected = 0; cub::DeviceRadixSort::SortPairs((void*)cub_storage.data(), cub_bytes, ws_priority.data(), ws_priority_sorted.data(), idx.data(), ws_idx_sorted.data(), n_ws, 0, sizeof(int) * 8, stream); // Select first from free vectors (0<alpha<C) n_selected += SelectPrevWs( 2 * nc, n_selected, [alpha, C] HD(int idx) { return 0 < alpha[idx] && alpha[idx] < C[idx]; }); // then from lower bound (alpha=0) n_selected += SelectPrevWs(2 * nc, n_selected, [alpha] HD(int idx) { return alpha[idx] <= 0; }); // and in the end from upper bound vectors (alpha=c) n_selected += SelectPrevWs(2 * nc, n_selected, [alpha, C] HD(int idx) { return alpha[idx] >= C[idx]; }); // we have now idx[0:n_selected] indices from the old working set // we need to update their priority. update_priority<<<raft::ceildiv(n_selected, TPB), TPB, 0, stream>>>(ws_priority.data(), n_selected, idx.data(), n_ws, ws_idx_sorted.data(), ws_priority_sorted.data()); return n_selected; } private: const raft::handle_t& handle; cudaStream_t stream; bool firstcall = true; int n_train = 0; ///< number of training vectors (including duplicates for SVR) int n_rows = 0; ///< number of original training vectors (no duplicates) int n_ws = 0; SvmType svmType; int TPB = 256; //!< Threads per block for workspace selection kernels // Buffers for the domain size [n_train] rmm::device_uvector<int> f_idx; //!< Arrays used for sorting for sorting rmm::device_uvector<int> f_idx_sorted; //! Temporary buffer for index manipulation rmm::device_uvector<int> idx_tmp; rmm::device_uvector<math_t> f_sorted; //! Flag vectors available for selection rmm::device_uvector<bool> available; rmm::device_uvector<bool> available_sorted; // working set buffers size [n_ws] rmm::device_uvector<int> idx; //!< Indices of the worknig set rmm::device_uvector<int> ws_idx_sorted; rmm::device_uvector<int> ws_idx_selected; rmm::device_uvector<int> ws_idx_save; rmm::device_uvector<int> ws_priority; rmm::device_uvector<int> ws_priority_sorted; rmm::device_scalar<int> d_num_selected; std::size_t cub_bytes = 0; rmm::device_uvector<char> cub_storage; void AllocateBuffers() { if (n_ws > 0) { f_idx.resize(n_train, stream); f_idx_sorted.resize(n_train, stream); idx_tmp.resize(n_train, stream); f_sorted.resize(n_train, stream); available.resize(n_train, stream); available_sorted.resize(n_train, stream); idx.resize(n_ws, stream); // allocate(idx, n_ws, stream); ws_idx_sorted.resize(n_ws, stream); ws_idx_save.resize(n_ws, stream); ws_idx_selected.resize(n_ws, stream); ws_priority.resize(n_ws, stream); ws_priority_sorted.resize(n_ws, stream); // Determine temporary device storage requirements for cub std::size_t cub_bytes2 = 0; cub::DeviceRadixSort::SortPairs(NULL, cub_bytes, f_sorted.data(), f_sorted.data(), f_idx.data(), f_idx_sorted.data(), n_train, 0, 8 * sizeof(math_t), stream); cub::DeviceSelect::If(NULL, cub_bytes2, f_idx.data(), f_idx.data(), d_num_selected.data(), n_train, always_true, stream); cub_bytes = std::max(cub_bytes, cub_bytes2); cub_storage.resize(cub_bytes, stream); Initialize(); } } /** * @brief Gather available elements from the working set. * * We select the first (last) n_needed element from the front (end) of * f_idx_sorted. We ignore the elements that are already selected, and those * where this->available is false. * * @param n_already_selected number of element already selected (their indices * are stored in idx[0:n_already_selected]) * @param n_needed number of elements to be selected * @param copy_front if true, then copy the elements from the front of the * selected list, otherwise copy from the end of the list * @return the number of elements copied (which might be less than n_needed) */ int GatherAvailable(int n_already_selected, int n_needed, bool copy_front) { // First we update the mask to ignores already selected elements bool* available = this->available.data(); if (n_already_selected > 0) { set_unavailable<<<raft::ceildiv(n_train, TPB), TPB, 0, stream>>>( available, n_train, idx.data(), n_already_selected); RAFT_CUDA_TRY(cudaPeekAtLastError()); } if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG) && n_train < 20) { std::stringstream ss; raft::print_device_vector("avail", available, n_train, ss); CUML_LOG_DEBUG(ss.str().c_str()); } // Map the mask to the sorted indices thrust::device_ptr<bool> av_ptr(available); thrust::device_ptr<bool> av_sorted_ptr(available_sorted.data()); thrust::device_ptr<int> idx_ptr(f_idx_sorted.data()); thrust::copy(thrust::cuda::par.on(stream), thrust::make_permutation_iterator(av_ptr, idx_ptr), thrust::make_permutation_iterator(av_ptr, idx_ptr + n_train), av_sorted_ptr); if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG) && n_train < 20) { std::stringstream ss; raft::print_device_vector("avail_sorted", available_sorted.data(), n_train, ss); CUML_LOG_DEBUG(ss.str().c_str()); } // Select the available elements cub::DeviceSelect::Flagged((void*)cub_storage.data(), cub_bytes, f_idx_sorted.data(), available_sorted.data(), idx_tmp.data(), d_num_selected.data(), n_train, stream); int n_selected = d_num_selected.value(stream); handle.sync_stream(stream); // Copy to output int n_copy = n_selected > n_needed ? n_needed : n_selected; if (copy_front) { raft::copy(idx.data() + n_already_selected, idx_tmp.data(), n_copy, stream); } else { raft::copy( idx.data() + n_already_selected, idx_tmp.data() + n_selected - n_copy, n_copy, stream); } if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG) && n_train < 20) { std::stringstream ss; raft::print_device_vector("selected", idx.data(), n_already_selected + n_copy, ss); CUML_LOG_DEBUG(ss.str().c_str()); } return n_copy; } void Initialize() { raft::linalg::range(f_idx.data(), n_train, stream); raft::linalg::range(idx.data(), n_ws, stream); } /** * @brief Select the first n_needed elements from ws_idx_sorted where op is true. * * The selected elements are appended to this->idx. * * @param n_needed number of elements that should be selected * @param n_already_selected number of already selected elements * @param op selection condition * @return the number of elements selected */ template <typename select_op> int SelectPrevWs(int n_needed, int n_already_selected, select_op op) { n_needed -= n_already_selected; if (n_needed <= 0) { return 0; } cub::DeviceSelect::If(cub_storage.data(), cub_bytes, ws_idx_sorted.data(), ws_idx_selected.data(), d_num_selected.data(), n_ws, op, stream); int n_selected = d_num_selected.value(stream); handle.sync_stream(stream); int n_copy = n_selected < n_needed ? n_selected : n_needed; raft::copy(idx.data() + n_already_selected, ws_idx_selected.data(), n_copy, stream); return n_copy; } }; }; // end namespace SVM }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/svm/smo_sets.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/util/cuda_utils.cuh> namespace ML { namespace SVM { /** Determine whether a training instance is in the upper set */ template <typename math_t> DI bool in_upper(math_t a, math_t y, math_t C) { // (0 < a && a < C) || (y == 1 && a == 0) || (y == -1 && a == C); // since a is always clipped to lie in the [0 C] region, therefore this is equivalent with return (y < 0 && a > 0) || (y > 0 && a < C); } /** Determine whether a training instance is in the lower set */ template <typename math_t> DI bool in_lower(math_t a, math_t y, math_t C) { // (0 < a && a < C) || (y == -1 && a == 0) || (y == 1 && a == C); // since a is always clipped to lie in the [0 C] region, therefore this is equivalent with return (y < 0 && a < C) || (y > 0 && a > 0); } }; // end namespace SVM }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/svm/sparse_util.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/device_csr_matrix.hpp> #include <raft/core/device_resources.hpp> #include <raft/core/handle.hpp> #include <raft/distance/kernels.cuh> #include <raft/matrix/matrix.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/transform_scan.h> namespace ML { namespace SVM { /** * @brief Kernel call helper * * Specialization for * DENSE(mdspan) x DENSE(mdspan) -> DENSE(raw pointer) * * @param [in] handle raft handle * @param [in] kernel kernel instance * @param [in] input1 matrix input, either dense or csr [i, j] * @param [in] input2 matrix input, either dense or csr [k, j] * @param [out] result evaluated kernel matrix [i, k] * @param [in] norm_x1 L2-norm of input1's rows (optional, only RBF) * @param [in] norm_x2 L2-norm of input2's rows (optional, only RBF) */ template <typename math_t> void KernelOp(const raft::handle_t& handle, raft::distance::kernels::GramMatrixBase<math_t>* kernel, raft::device_matrix_view<math_t, int, raft::layout_stride> input1, raft::device_matrix_view<math_t, int, raft::layout_stride> input2, math_t* result, math_t* norm1 = nullptr, math_t* norm2 = nullptr) { auto const_input1 = raft::make_const_mdspan(input1); auto const_input2 = raft::make_const_mdspan(input2); auto result_view = raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>( result, input1.extent(0), input2.extent(0), 0); (*kernel)(handle, const_input1, const_input2, result_view, norm1, norm2); } /** * @brief Kernel call helper * * Specialization for * DENSE(mdspan) x DENSE(raw pointer) -> DENSE(raw pointer) * * @param [in] handle raft handle * @param [in] kernel kernel instance * @param [in] input1 matrix input, either dense or csr [i, j] * @param [in] input2 matrix input, either dense or csr [k, j] * @param [in] rows2 number of rows for input2 * @param [out] result evaluated kernel matrix [i, k] * @param [in] norm_x1 L2-norm of input1's rows (optional, only RBF) * @param [in] norm_x2 L2-norm of input2's rows (optional, only RBF) */ template <typename math_t> void KernelOp(const raft::handle_t& handle, raft::distance::kernels::GramMatrixBase<math_t>* kernel, raft::device_matrix_view<math_t, int, raft::layout_stride> input1, math_t* input2, int rows2, math_t* result, math_t* norm1 = nullptr, math_t* norm2 = nullptr) { auto view2 = raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>( input2, rows2, input1.extent(1), 0); KernelOp(handle, kernel, input1, view2, result, norm1, norm2); } /** * @brief Kernel call helper * * Specialization for * DENSE(raw pointer) x DENSE(raw pointer) -> DENSE(raw pointer) * * @param [in] handle raft handle * @param [in] kernel kernel instance * @param [in] input1 matrix input, either dense or csr [i, j] * @param [in] rows1 number of rows for input1 * @param [in] cols number of cols for input1/input2 * @param [in] input2 matrix input, either dense or csr [k, j] * @param [in] rows2 number of rows for input2 * @param [out] result evaluated kernel matrix [i, k] * @param [in] norm_x1 L2-norm of input1's rows (optional, only RBF) * @param [in] norm_x2 L2-norm of input2's rows (optional, only RBF) */ template <typename math_t> void KernelOp(const raft::handle_t& handle, raft::distance::kernels::GramMatrixBase<math_t>* kernel, math_t* input1, int rows1, int cols, math_t* input2, int rows2, math_t* result, math_t* norm1 = nullptr, math_t* norm2 = nullptr) { auto view1 = raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>( input1, rows1, cols, 0); auto view2 = raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>( input2, rows2, cols, 0); KernelOp(handle, kernel, view1, view2, result, norm1, norm2); } /** * @brief Kernel call helper * * Specialization for * CSR(matrix_view) x CSR(matrix_view) -> DENSE(raw pointer) * * @param [in] handle raft handle * @param [in] kernel kernel instance * @param [in] input1 matrix input, either dense or csr [i, j] * @param [in] input2 matrix input, either dense or csr [k, j] * @param [out] result evaluated kernel matrix [i, k] * @param [in] norm_x1 L2-norm of input1's rows (optional, only RBF) * @param [in] norm_x2 L2-norm of input2's rows (optional, only RBF) */ template <typename math_t> void KernelOp(const raft::handle_t& handle, raft::distance::kernels::GramMatrixBase<math_t>* kernel, raft::device_csr_matrix_view<math_t, int, int, int> input1, raft::device_csr_matrix_view<math_t, int, int, int> input2, math_t* result, math_t* norm1 = nullptr, math_t* norm2 = nullptr) { auto const_input1 = raft::make_device_csr_matrix_view<const math_t, int, int, int>( input1.get_elements().data(), input1.structure_view()); auto const_input2 = raft::make_device_csr_matrix_view<const math_t, int, int, int>( input2.get_elements().data(), input2.structure_view()); auto result_view = raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>( result, input1.structure_view().get_n_rows(), input2.structure_view().get_n_rows(), 0); (*kernel)(handle, const_input1, const_input2, result_view, norm1, norm2); } /** * @brief Kernel call helper * * Specialization for * CSR(matrix_view) x DENSE(mdspan) -> DENSE(raw pointer) * * @param [in] handle raft handle * @param [in] kernel kernel instance * @param [in] input1 matrix input, either dense or csr [i, j] * @param [in] input2 matrix input, either dense or csr [k, j] * @param [out] result evaluated kernel matrix [i, k] * @param [in] norm_x1 L2-norm of input1's rows (optional, only RBF) * @param [in] norm_x2 L2-norm of input2's rows (optional, only RBF) */ template <typename math_t> void KernelOp(const raft::handle_t& handle, raft::distance::kernels::GramMatrixBase<math_t>* kernel, raft::device_csr_matrix_view<math_t, int, int, int> input1, raft::device_matrix_view<math_t, int, raft::layout_stride> input2, math_t* result, math_t* norm1 = nullptr, math_t* norm2 = nullptr) { auto const_input1 = raft::make_device_csr_matrix_view<const math_t, int, int, int>( input1.get_elements().data(), input1.structure_view()); auto const_input2 = raft::make_const_mdspan(input2); auto result_view = raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>( result, input1.structure_view().get_n_rows(), input2.extent(0), 0); (*kernel)(handle, const_input1, const_input2, result_view, norm1, norm2); } /** * @brief Kernel call helper * * Specialization for * DENSE(mdspan) x CSR(matrix_view) -> DENSE(raw pointer) * * @param [in] handle raft handle * @param [in] kernel kernel instance * @param [in] input1 matrix input, either dense or csr [i, j] * @param [in] input2 matrix input, either dense or csr [k, j] * @param [out] result evaluated kernel matrix [i, k] * @param [in] norm_x1 L2-norm of input1's rows (optional, only RBF) * @param [in] norm_x2 L2-norm of input2's rows (optional, only RBF) */ template <typename math_t> void KernelOp(const raft::handle_t& handle, raft::distance::kernels::GramMatrixBase<math_t>* kernel, raft::device_matrix_view<math_t, int, raft::layout_stride> input1, raft::device_csr_matrix_view<math_t, int, int, int> input2, math_t* result, math_t* norm1 = nullptr, math_t* norm2 = nullptr) { ASSERT(false, "KernelOp not implemented for DENSE x CSR."); } /** * @brief Kernel call helper * * Specialization for * CSR(matrix_view) x DENSE(raw pointer) -> DENSE(raw pointer) * * @param [in] handle raft handle * @param [in] kernel kernel instance * @param [in] input1 matrix input, either dense or csr [i, j] * @param [in] input2 matrix input, either dense or csr [k, j] * @param [in] rows2 number of rows for input2 * @param [out] result evaluated kernel matrix [i, k] * @param [in] norm_x1 L2-norm of input1's rows (optional, only RBF) * @param [in] norm_x2 L2-norm of input2's rows (optional, only RBF) */ template <typename math_t> void KernelOp(const raft::handle_t& handle, raft::distance::kernels::GramMatrixBase<math_t>* kernel, raft::device_csr_matrix_view<math_t, int, int, int> input1, math_t* input2, int rows2, math_t* result, math_t* norm1 = nullptr, math_t* norm2 = nullptr) { auto view2 = raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>( input2, rows2, input1.structure_view().get_n_cols(), 0); KernelOp(handle, kernel, input1, view2, result, norm1, norm2); } /** * @brief Create view on matrix batch of contiguous rows * * This specialization creates a device matrix view * representing a batch from a from a given device * matrix view. * * @param [in] handle raft handle * @param [in] matrix matrix input, csr [i, j] * @param [in] batch_size number of rows within batch * @param [in] offset row offset for batch start * @param [in] host_indptr unused * @param [in] target_indptr unused * @param [in] stream stream * @return matrix_batch */ template <typename math_t> raft::device_matrix_view<math_t, int, raft::layout_stride> getMatrixBatch( raft::device_matrix_view<math_t, int, raft::layout_stride> matrix, int batch_size, int offset, int* host_indptr, int* target_indptr, cudaStream_t stream) { if (batch_size == matrix.extent(0)) { return matrix; } else { return raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>( matrix.data_handle() + offset, batch_size, matrix.extent(1), matrix.extent(0)); } } /** * @brief Create view on matrix batch of contiguous rows * * This specialization creates a device csr matrix view * representing a batch from a from a given device csr * matrix view. * * @param [in] handle raft handle * @param [in] matrix matrix input, csr [i, j] * @param [in] batch_size number of rows within batch * @param [in] offset row offset for batch start * @param [in] host_indptr host copy of indptr * @param [in] target_indptr target buffer for modified indptr * @param [in] stream stream * @return matrix_batch */ template <typename math_t> raft::device_csr_matrix_view<math_t, int, int, int> getMatrixBatch( raft::device_csr_matrix_view<math_t, int, int, int> matrix, int batch_size, int offset, int* host_indptr, int* target_indptr, cudaStream_t stream) { auto csr_struct_in = matrix.structure_view(); if (batch_size == csr_struct_in.get_n_rows()) { return matrix; } else { int* indptr_in = csr_struct_in.get_indptr().data(); int* indices_in = csr_struct_in.get_indices().data(); math_t* data_in = matrix.get_elements().data(); int nnz_offset = host_indptr[offset]; int batch_nnz = host_indptr[offset + batch_size] - nnz_offset; { thrust::device_ptr<int> inptr_src(indptr_in + offset); thrust::device_ptr<int> inptr_tgt(target_indptr); thrust::transform(thrust::cuda::par.on(stream), inptr_src, inptr_src + batch_size + 1, thrust::make_constant_iterator(nnz_offset), inptr_tgt, thrust::minus<int>()); } auto csr_struct_out = raft::make_device_compressed_structure_view<int, int, int>( target_indptr, indices_in + nnz_offset, batch_size, csr_struct_in.get_n_cols(), batch_nnz); return raft::make_device_csr_matrix_view(data_in + nnz_offset, csr_struct_out); } } template <typename math_t> static __global__ void extractDenseRowsFromCSR(math_t* out, const int* indptr, const int* indices, const math_t* data, const int* row_indices, const int num_indices) { assert(gridDim.y == 1 && gridDim.z == 1); // all threads in x-direction are responsible for one line of csr int idx = blockIdx.x * blockDim.y + threadIdx.y; if (idx >= num_indices) return; int row_idx = row_indices[idx]; int rowStartIdx = indptr[row_idx]; int rowEndIdx = indptr[row_idx + 1]; for (int pos = rowStartIdx + threadIdx.x; pos < rowEndIdx; pos += blockDim.x) { int col_idx = indices[pos]; out[idx + col_idx * num_indices] = data[pos]; } } template <typename math_t> static __global__ void extractCSRRowsFromCSR(int* indptr_out, // already holds end positions int* indices_out, math_t* data_out, const int* indptr_in, const int* indices_in, const math_t* data_in, const int* row_indices, const int num_indices) { assert(gridDim.y == 1 && gridDim.z == 1); // all threads in x-direction are responsible for one line of csr int idx = blockIdx.x * blockDim.y + threadIdx.y; if (idx >= num_indices) return; // the first row has to set the indptr_out[0] value to 0 if (threadIdx.x == 0 && idx == 0) { indptr_out[0] = 0; } int row_idx = row_indices[idx]; int in_offset = indptr_in[row_idx]; int row_length = indptr_in[row_idx + 1] - in_offset; int out_offset = indptr_out[idx]; for (int pos = threadIdx.x; pos < row_length; pos += blockDim.x) { indices_out[out_offset + pos] = indices_in[in_offset + pos]; data_out[out_offset + pos] = data_in[in_offset + pos]; } } /** * Returns whether MatrixViewType is a device_matrix_view * * @return true if MatrixViewType is device dense mdspan */ template <typename MatrixViewType> bool isDenseType() { return (std::is_same<MatrixViewType, raft::device_matrix_view<float, int, raft::layout_stride>>::value || std::is_same<MatrixViewType, raft::device_matrix_view<double, int, raft::layout_stride>>::value); } /** * @brief Specialization of compute row norm for dense matrix * * This utility runs the row norm computation for a dense and * contiguous device matrix. * * * @param [in] handle raft handle * @param [in] matrix matrix input, dense [i, j] * @param [out] target row norm, size needs to be at least [i] * @param [in] norm norm type to be evaluated */ template <typename math_t> void matrixRowNorm(const raft::handle_t& handle, raft::device_matrix_view<math_t, int, raft::layout_stride> matrix, math_t* target, raft::linalg::NormType norm) { bool is_row_major_contiguous = matrix.stride(1) == 1 && matrix.stride(0) == matrix.extent(1); bool is_col_major_contiguous = matrix.stride(0) == 1 && matrix.stride(1) == matrix.extent(0); ASSERT(is_row_major_contiguous || is_col_major_contiguous, "Dense matrix rowNorm only support contiguous data"); raft::linalg::rowNorm(target, matrix.data_handle(), matrix.extent(1), //! cols first arg! matrix.extent(0), norm, is_row_major_contiguous, handle.get_stream()); } /** * @brief Specialization of compute row norm for csr matrix * * This utility runs the row norm computation for a csr matrix. * * @param [in] handle raft handle * @param [in] matrix matrix input, csr [i, j] * @param [out] target row norm, size needs to be at least [i] * @param [in] norm norm type to be evaluated */ template <typename math_t> void matrixRowNorm(const raft::handle_t& handle, raft::device_csr_matrix_view<math_t, int, int, int> matrix, math_t* target, raft::linalg::NormType norm) { auto csr_struct_in = matrix.structure_view(); raft::sparse::linalg::rowNormCsr(handle, csr_struct_in.get_indptr().data(), matrix.get_elements().data(), csr_struct_in.get_nnz(), csr_struct_in.get_n_rows(), target, norm); } /** * @brief Extract CSR rows to dense * * Extraction of individual rows of a CSR matrix into a dense * array with column major order. * * @param [in] indptr row index pointer of CSR input [n_rows + 1] * @param [in] indices column indices of CSR input [nnz = indptr[nrows + 1]] * @param [in] data values of CSR input [nnz = indptr[nrows + 1]] * @param [in] n_rows number of matrix rows * @param [in] n_cols number of matrix columns * @param [out] output dense array, size needs to be at least [num_indices * n_cols] * @param [in] row_indices row indices to extract [num_indices] * @param [in] num_indices number of indices to extract * @param [in] stream cuda stream */ template <typename math_t> static void copySparseRowsToDense(const int* indptr, const int* indices, const math_t* data, int n_rows, int n_cols, math_t* output, const int* row_indices, int num_indices, cudaStream_t stream) { thrust::device_ptr<math_t> output_ptr(output); thrust::fill( thrust::cuda::par.on(stream), output_ptr, output_ptr + num_indices * n_cols, (math_t)0); // copy with 1 warp per row for now, blocksize 256 const dim3 bs(32, 8, 1); const dim3 gs(raft::ceildiv(num_indices, (int)bs.y), 1, 1); extractDenseRowsFromCSR<math_t> <<<gs, bs, 0, stream>>>(output, indptr, indices, data, row_indices, num_indices); RAFT_CUDA_TRY(cudaPeekAtLastError()); } struct rowsize : public thrust::unary_function<int, int> { const int* indptr_; rowsize(const int* indptr) : indptr_(indptr) {} __device__ int64_t operator()(const int& x) const { return indptr_[x + 1] - indptr_[x]; } }; /** * @brief Extract matrix rows to sub matrix * * This is the specialized version for * 'DENSE -> CSR (data owning)' * * Note: just added for compilation, should not be hit at runtime */ template <typename math_t, typename LayoutPolicyIn> void extractRows(raft::device_matrix_view<math_t, int, LayoutPolicyIn> matrix_in, raft::device_csr_matrix<math_t, int, int, int> matrix_out, const int* row_indices, int num_indices, const raft::handle_t& handle) { ASSERT(false, "extractRows from DENSE-CSR not implemented."); } /** * @brief Extract matrix rows to sub matrix * * This is the specialized version for * 'DENSE -> DENSE (raw pointer)' * * TODO: move this functionality to * https://github.com/rapidsai/raft/issues/1524 * * @param [in] matrix_in matrix input (dense view) [i, j] * @param [out] matrix_out matrix output raw pointer, size at least num_indices*j * @param [in] row_indices row indices to extract [num_indices] * @param [in] num_indices number of indices to extract * @param [in] handle raft handle */ template <typename math_t, typename LayoutPolicyIn> void extractRows(raft::device_matrix_view<math_t, int, LayoutPolicyIn> matrix_in, math_t* matrix_out, const int* row_indices, int num_indices, const raft::handle_t& handle) { ASSERT(matrix_in.stride(0) == 1, "Matrix needs to be column major"); ASSERT(matrix_in.stride(1) == matrix_in.extent(0), "No padding supported"); raft::matrix::copyRows<math_t, int, size_t>(matrix_in.data_handle(), matrix_in.extent(0), matrix_in.extent(1), matrix_out, row_indices, num_indices, handle.get_stream(), false); RAFT_CUDA_TRY(cudaPeekAtLastError()); } /** * @brief Extract matrix rows to sub matrix * * This is the specialized version for * 'CSR -> DENSE (raw pointer)' * * @param [in] matrix_in matrix input in CSR [i, j] * @param [out] matrix_out matrix output raw pointer, size at least num_indices*j * @param [in] row_indices row indices to extract [num_indices] * @param [in] num_indices number of indices to extract * @param [in] handle raft handle */ template <typename math_t> void extractRows(raft::device_csr_matrix_view<math_t, int, int, int> matrix_in, math_t* matrix_out, const int* row_indices, int num_indices, const raft::handle_t& handle) { auto stream = handle.get_stream(); auto csr_struct_in = matrix_in.structure_view(); // initialize dense target thrust::device_ptr<math_t> output_ptr(matrix_out); thrust::fill(thrust::cuda::par.on(stream), output_ptr, output_ptr + num_indices * csr_struct_in.get_n_cols(), (math_t)0); int* indptr = csr_struct_in.get_indptr().data(); int* indices = csr_struct_in.get_indices().data(); math_t* data = matrix_in.get_elements().data(); // copy with 1 warp per row for now, blocksize 256 const dim3 bs(32, 8, 1); const dim3 gs(raft::ceildiv(num_indices, (int)bs.y), 1, 1); extractDenseRowsFromCSR<math_t> <<<gs, bs, 0, stream>>>(matrix_out, indptr, indices, data, row_indices, num_indices); RAFT_CUDA_TRY(cudaPeekAtLastError()); } namespace { int computeIndptrForSubset( int* indptr_in, int* indptr_out, const int* row_indices, int num_indices, cudaStream_t stream) { thrust::device_ptr<int> row_sizes_ptr(indptr_out); thrust::device_ptr<const int> row_new_indices_ptr(row_indices); thrust::transform_inclusive_scan(thrust::cuda::par.on(stream), row_new_indices_ptr, row_new_indices_ptr + num_indices, row_sizes_ptr + 1, rowsize(indptr_in), thrust::plus<int>()); // retrieve nnz from indptr_in[num_indices] int nnz; raft::update_host(&nnz, indptr_out + num_indices, 1, stream); cudaStreamSynchronize(stream); return nnz; } } // namespace /** * @brief copy row pointers from device to host * * This is only implemented for CSR * * @param [in] matrix matrix input in CSR [i, j] * @param [out] host_indptr indptr in host [i + 1] * @param [in] stream cuda stream */ template <typename math_t> void copyIndptrToHost(raft::device_csr_matrix_view<math_t, int, int, int> matrix, int* host_indptr, cudaStream_t stream) { raft::update_host(host_indptr, matrix.structure_view().get_indptr().data(), matrix.structure_view().get_n_rows() + 1, stream); cudaStreamSynchronize(stream); } /** * @brief copy row pointers from device to host * * This is only implemented for CSR * * @param [in] matrix matrix input [i, j] * @param [out] host_indptr indptr in host [i + 1] * @param [in] stream cuda stream */ template <typename math_t, typename LayoutPolicyIn> void copyIndptrToHost(raft::device_matrix_view<math_t, int, LayoutPolicyIn> matrix, int* host_indptr, cudaStream_t stream) { ASSERT(false, "Variant not implemented."); } /** * @brief Extract matrix rows to sub matrix * * This is the specialized version for * 'CSR -> CSR (data owning)' * * TODO: move this functionality to * https://github.com/rapidsai/raft/issues/1524 * * @param [in] matrix_in matrix input in CSR [i, j] * @param [out] matrix_out matrix output in CSR [num_indices, j] * @param [in] row_indices row indices to extract [num_indices] * @param [in] num_indices number of indices to extract * @param [in] handle raft handle */ template <typename math_t> void extractRows(raft::device_csr_matrix_view<math_t, int, int, int> matrix_in, raft::device_csr_matrix<math_t, int, int, int>& matrix_out, const int* row_indices, int num_indices, const raft::handle_t& handle) { auto stream = handle.get_stream(); auto csr_struct_in = matrix_in.structure_view(); int* indptr_in = csr_struct_in.get_indptr().data(); int* indices_in = csr_struct_in.get_indices().data(); math_t* data_in = matrix_in.get_elements().data(); auto csr_struct_out = matrix_out.structure_view(); int* indptr_out = csr_struct_out.get_indptr().data(); int nnz = computeIndptrForSubset(indptr_in, indptr_out, row_indices, num_indices, stream); // this might invalidate indices/data pointers! matrix_out.initialize_sparsity(nnz); int* indices_out = matrix_out.structure_view().get_indices().data(); math_t* data_out = matrix_out.get_elements().data(); // copy with 1 warp per row for now, blocksize 256 const dim3 bs(32, 8, 1); const dim3 gs(raft::ceildiv(num_indices, (int)bs.y), 1, 1); extractCSRRowsFromCSR<math_t><<<gs, bs, 0, stream>>>( indptr_out, indices_out, data_out, indptr_in, indices_in, data_in, row_indices, num_indices); RAFT_CUDA_TRY(cudaPeekAtLastError()); } /** * @brief Extract matrix rows to sub matrix * * This is the specialized version for * 'DENSE -> CSR (raw pointers)' * * Note: just added for compilation, should not be hit at runtime */ template <typename math_t, typename LayoutPolicyIn> void extractRows(raft::device_matrix_view<math_t, int, LayoutPolicyIn> matrix_in, int** indptr_out, int** indices_out, math_t** data_out, int* nnz, const int* row_indices, int num_indices, const raft::handle_t& handle) { ASSERT(false, "extractRows not implemented for DENSE->CSR"); } /** * @brief Extract matrix rows to sub matrix * * This is the specialized version for * 'CSR -> CSR (raw pointers)' * * Warning: this specialization will allocate the the required arrays in device memory. * * @param [in] matrix_in matrix input in CSR [i, j] * @param [out] indptr_out row index pointer of CSR output [num_indices + 1] * @param [out] indices_out column indices of CSR output [nnz = indptr_out[num_indices + 1]] * @param [out] data_out values of CSR output [nnz = indptr_out[num_indices + 1]] * @param [out] nnz number of indices to extract * @param [in] row_indices row indices to extract [num_indices] * @param [in] num_indices number of indices to extract * @param [in] handle raft handle */ template <typename math_t> void extractRows(raft::device_csr_matrix_view<math_t, int, int, int> matrix_in, int** indptr_out, int** indices_out, math_t** data_out, int* nnz, const int* row_indices, int num_indices, const raft::handle_t& handle) { auto stream = handle.get_stream(); auto csr_struct_in = matrix_in.structure_view(); int* indptr_in = csr_struct_in.get_indptr().data(); int* indices_in = csr_struct_in.get_indices().data(); math_t* data_in = matrix_in.get_elements().data(); // allocate indptr auto* rmm_alloc = rmm::mr::get_current_device_resource(); *indptr_out = (int*)rmm_alloc->allocate((num_indices + 1) * sizeof(int), stream); *nnz = computeIndptrForSubset(indptr_in, *indptr_out, row_indices, num_indices, stream); // allocate indices, data *indices_out = (int*)rmm_alloc->allocate(*nnz * sizeof(int), stream); *data_out = (math_t*)rmm_alloc->allocate(*nnz * sizeof(math_t), stream); // copy with 1 warp per row for now, blocksize 256 const dim3 bs(32, 8, 1); const dim3 gs(raft::ceildiv(num_indices, (int)bs.y), 1, 1); extractCSRRowsFromCSR<math_t><<<gs, bs, 0, stream>>>( *indptr_out, *indices_out, *data_out, indptr_in, indices_in, data_in, row_indices, num_indices); RAFT_CUDA_TRY(cudaPeekAtLastError()); } } // namespace SVM } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/svm/svc_impl.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once /** @file svc_impl.cuh * @brief Implementation of the stateless C++ functions to fit an SVM * classifier, and predict with it. */ #include <iostream> #include "kernelcache.cuh" #include "smosolver.cuh" #include <cublas_v2.h> #include <cuml/svm/svm_model.h> #include <cuml/svm/svm_parameter.h> #include <raft/core/handle.hpp> #include <raft/distance/kernels.cuh> #include <raft/label/classlabels.cuh> #include <raft/linalg/gemv.cuh> #include <rmm/device_uvector.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/iterator/counting_iterator.h> namespace ML { namespace SVM { template <typename math_t, typename MatrixViewType> void svcFitX(const raft::handle_t& handle, MatrixViewType matrix, int n_rows, int n_cols, math_t* labels, const SvmParameter& param, raft::distance::kernels::KernelParams& kernel_params, SvmModel<math_t>& model, const math_t* sample_weight) { ASSERT(n_cols > 0, "Parameter n_cols: number of columns cannot be less than one"); ASSERT(n_rows > 0, "Parameter n_rows: number of rows cannot be less than one"); // KernelCache could use multiple streams, not implemented currently // See Issue #948. // ML::detail::streamSyncer _(handle_impl.getImpl()); const raft::handle_t& handle_impl = handle; cudaStream_t stream = handle_impl.get_stream(); { rmm::device_uvector<math_t> unique_labels(0, stream); model.n_classes = raft::label::getUniquelabels(unique_labels, labels, n_rows, stream); rmm::mr::device_memory_resource* rmm_alloc = rmm::mr::get_current_device_resource(); model.unique_labels = (math_t*)rmm_alloc->allocate(model.n_classes * sizeof(math_t), stream); raft::copy(model.unique_labels, unique_labels.data(), model.n_classes, stream); handle_impl.sync_stream(stream); } ASSERT(model.n_classes == 2, "Only binary classification is implemented at the moment"); rmm::device_uvector<math_t> y(n_rows, stream); raft::label::getOvrlabels( labels, n_rows, model.unique_labels, model.n_classes, y.data(), 1, stream); raft::distance::kernels::GramMatrixBase<math_t>* kernel = raft::distance::kernels::KernelFactory<math_t>::create(kernel_params); SmoSolver<math_t> smo(handle_impl, param, kernel_params.kernel, kernel); smo.Solve(matrix, n_rows, n_cols, y.data(), sample_weight, &(model.dual_coefs), &(model.n_support), &(model.support_matrix), &(model.support_idx), &(model.b), param.max_iter); model.n_cols = n_cols; handle_impl.sync_stream(stream); delete kernel; } template <typename math_t> void svcFit(const raft::handle_t& handle, math_t* input, int n_rows, int n_cols, math_t* labels, const SvmParameter& param, raft::distance::kernels::KernelParams& kernel_params, SvmModel<math_t>& model, const math_t* sample_weight) { auto dense_view = raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>( input, n_rows, n_cols, 0); svcFitX(handle, dense_view, n_rows, n_cols, labels, param, kernel_params, model, sample_weight); } template <typename math_t> void svcFitSparse(const raft::handle_t& handle, int* indptr, int* indices, math_t* data, int n_rows, int n_cols, int nnz, math_t* labels, const SvmParameter& param, raft::distance::kernels::KernelParams& kernel_params, SvmModel<math_t>& model, const math_t* sample_weight) { auto csr_structure_view = raft::make_device_compressed_structure_view<int, int, int>( indptr, indices, n_rows, n_cols, nnz); auto csr_matrix_view = raft::make_device_csr_matrix_view(data, csr_structure_view); svcFitX( handle, csr_matrix_view, n_rows, n_cols, labels, param, kernel_params, model, sample_weight); } template <typename math_t, typename MatrixViewType> void svcPredictX(const raft::handle_t& handle, MatrixViewType matrix, int n_rows, int n_cols, raft::distance::kernels::KernelParams& kernel_params, const SvmModel<math_t>& model, math_t* preds, math_t buffer_size, bool predict_class) { ASSERT(n_cols == model.n_cols, "Parameter n_cols: shall be the same that was used for fitting"); // We might want to query the available memory before selecting the batch size. // We will need n_batch * n_support floats for the kernel matrix K. int n_batch = n_rows; // Limit the memory size of the prediction buffer buffer_size = buffer_size * 1024 * 1024; if ((size_t)n_batch * model.n_support * sizeof(math_t) > buffer_size) { n_batch = buffer_size / (model.n_support * sizeof(math_t)); if (n_batch < 1) n_batch = 1; } const raft::handle_t& handle_impl = handle; cudaStream_t stream = handle_impl.get_stream(); rmm::device_uvector<math_t> K(n_batch * model.n_support, stream); rmm::device_uvector<math_t> y(n_rows, stream); if (model.n_support == 0) { RAFT_CUDA_TRY(cudaMemsetAsync(y.data(), 0, n_rows * sizeof(math_t), stream)); } raft::distance::kernels::GramMatrixBase<math_t>* kernel = raft::distance::kernels::KernelFactory<math_t>::create(kernel_params); /* // kernel computation: ////////////////////////////////// Dense input, dense support: * just multiply, expanded L2 norm for RBF Sparse Input, dense support * row ptr copy/shift for input csr, expanded L2 norm for RBF Dense input, sparse support * transpose kernel compute, expanded L2 norm for RBF Sparse input, sparse support * row ptr copy/shift for input csr */ // store matrix dot product (l2 norm) for RBF kernels if applicable rmm::device_uvector<math_t> l2_input(0, stream); rmm::device_uvector<math_t> l2_support(0, stream); bool is_csr_input = !isDenseType<MatrixViewType>(); bool is_csr_support = model.support_matrix.data != nullptr && model.support_matrix.nnz >= 0; bool is_dense_support = model.support_matrix.data != nullptr && !is_csr_support; // Unfortunately we need runtime support for both types raft::device_matrix_view<math_t, int, raft::layout_stride> dense_support_matrix_view; if (is_dense_support) { dense_support_matrix_view = raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>( model.support_matrix.data, model.n_support, n_cols, 0); } auto csr_structure_view = is_csr_support ? raft::make_device_compressed_structure_view<int, int, int>(model.support_matrix.indptr, model.support_matrix.indices, model.n_support, n_cols, model.support_matrix.nnz) : raft::make_device_compressed_structure_view<int, int, int>(nullptr, nullptr, 0, 0, 0); auto csr_support_matrix_view = is_csr_support ? raft::make_device_csr_matrix_view<math_t, int, int, int>(model.support_matrix.data, csr_structure_view) : raft::make_device_csr_matrix_view<math_t, int, int, int>(nullptr, csr_structure_view); bool transpose_kernel = is_csr_support && !is_csr_input; if (model.n_support > 0 && kernel_params.kernel == raft::distance::kernels::RBF) { l2_input.resize(n_rows, stream); l2_support.resize(model.n_support, stream); ML::SVM::matrixRowNorm(handle, matrix, l2_input.data(), raft::linalg::NormType::L2Norm); if (model.n_support > 0) if (is_csr_support) { ML::SVM::matrixRowNorm( handle, csr_support_matrix_view, l2_support.data(), raft::linalg::NormType::L2Norm); } else { ML::SVM::matrixRowNorm( handle, dense_support_matrix_view, l2_support.data(), raft::linalg::NormType::L2Norm); } } // additional row pointer information needed for batched CSR access // copy matrix row pointer to host to compute partial nnz on the fly std::vector<int> host_indptr; rmm::device_uvector<int> indptr_batched(0, stream); if (model.n_support > 0 && is_csr_input) { host_indptr.resize(n_rows + 1); indptr_batched.resize(n_batch + 1, stream); copyIndptrToHost(matrix, host_indptr.data(), stream); } // We process the input data batchwise: // - calculate the kernel values K[x_batch, x_support] // - calculate y(x_batch) = K[x_batch, x_support] * dual_coeffs for (int i = 0; i < n_rows && model.n_support > 0; i += n_batch) { if (i + n_batch >= n_rows) { n_batch = n_rows - i; } math_t* l2_input1 = l2_input.data() != nullptr ? l2_input.data() + i : nullptr; math_t* l2_input2 = l2_support.data(); auto batch_matrix = getMatrixBatch(matrix, n_batch, i, host_indptr.data(), indptr_batched.data(), stream); if (transpose_kernel) { KernelOp( handle_impl, kernel, csr_support_matrix_view, batch_matrix, K.data(), l2_input2, l2_input1); } else if (is_csr_support) { KernelOp( handle_impl, kernel, batch_matrix, csr_support_matrix_view, K.data(), l2_input1, l2_input2); } else { KernelOp(handle_impl, kernel, batch_matrix, dense_support_matrix_view, K.data(), l2_input1, l2_input2); } math_t one = 1; math_t null = 0; raft::linalg::gemv(handle_impl, transpose_kernel, transpose_kernel ? model.n_support : n_batch, transpose_kernel ? n_batch : model.n_support, &one, K.data(), transpose_kernel ? model.n_support : n_batch, model.dual_coefs, 1, &null, y.data() + i, 1, stream); } // end of loop math_t* labels = model.unique_labels; math_t b = model.b; if (predict_class) { // Look up the label based on the value of the decision function: // f(x) = sign(y(x) + b) raft::linalg::unaryOp( preds, y.data(), n_rows, [labels, b] __device__(math_t y) { return y + b < 0 ? labels[0] : labels[1]; }, stream); } else { // Calculate the value of the decision function: f(x) = y(x) + b raft::linalg::unaryOp( preds, y.data(), n_rows, [b] __device__(math_t y) { return y + b; }, stream); } handle_impl.sync_stream(stream); delete kernel; } template <typename math_t> void svcPredict(const raft::handle_t& handle, math_t* input, int n_rows, int n_cols, raft::distance::kernels::KernelParams& kernel_params, const SvmModel<math_t>& model, math_t* preds, math_t buffer_size, bool predict_class) { auto dense_view = raft::make_device_strided_matrix_view<math_t, int, raft::layout_f_contiguous>( input, n_rows, n_cols, 0); svcPredictX( handle, dense_view, n_rows, n_cols, kernel_params, model, preds, buffer_size, predict_class); } template <typename math_t> void svcPredictSparse(const raft::handle_t& handle, int* indptr, int* indices, math_t* data, int n_rows, int n_cols, int nnz, raft::distance::kernels::KernelParams& kernel_params, const SvmModel<math_t>& model, math_t* preds, math_t buffer_size, bool predict_class) { auto csr_structure_view = raft::make_device_compressed_structure_view<int, int, int>( indptr, indices, n_rows, n_cols, nnz); auto csr_matrix_view = raft::make_device_csr_matrix_view(data, csr_structure_view); svcPredictX(handle, csr_matrix_view, n_rows, n_cols, kernel_params, model, preds, buffer_size, predict_class); } template <typename math_t> void svmFreeBuffers(const raft::handle_t& handle, SvmModel<math_t>& m) { cudaStream_t stream = handle.get_stream(); rmm::mr::device_memory_resource* rmm_alloc = rmm::mr::get_current_device_resource(); if (m.dual_coefs) rmm_alloc->deallocate(m.dual_coefs, m.n_support * sizeof(math_t), stream); if (m.support_idx) rmm_alloc->deallocate(m.support_idx, m.n_support * sizeof(int), stream); if (m.support_matrix.indptr) { rmm_alloc->deallocate(m.support_matrix.indptr, (m.n_support + 1) * sizeof(int), stream); m.support_matrix.indptr = nullptr; } if (m.support_matrix.indices) { rmm_alloc->deallocate(m.support_matrix.indices, m.support_matrix.nnz * sizeof(int), stream); m.support_matrix.indices = nullptr; } if (m.support_matrix.data) { if (m.support_matrix.nnz == -1) { rmm_alloc->deallocate(m.support_matrix.data, m.n_support * m.n_cols * sizeof(math_t), stream); } else { rmm_alloc->deallocate(m.support_matrix.data, m.support_matrix.nnz * sizeof(math_t), stream); } } m.support_matrix.nnz = -1; if (m.unique_labels) rmm_alloc->deallocate(m.unique_labels, m.n_classes * sizeof(math_t), stream); m.dual_coefs = nullptr; m.support_idx = nullptr; m.unique_labels = nullptr; } }; // end namespace SVM }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/svm/ws_util.cuh
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "smo_sets.cuh" namespace ML { namespace SVM { /** * Mark elements as unavailable if they are in the the idx list. * \param [out] available flag whether an idx is available, size [n_rows] * \param [in] n_rows number of training vectors * \param [in] idx list of indices already selected, size [n_selected] * \param [in] n_selected number of elements in the idx list */ __global__ void set_unavailable(bool* available, int n_rows, const int* idx, int n_selected); /** Set availability to true for elements in the upper set, otherwise false. * @param [out] available size [n] * @param [in] n of elements in the working set * @param [in] alpha dual coefficients, size [n] * @param [in] y class label, must be +/-1, size [n] * @param [in] C penalty factor */ template <typename math_t> __global__ void set_upper( bool* available, int n, const math_t* alpha, const math_t* y, const math_t* C) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < n) available[tid] = in_upper(alpha[tid], y[tid], C[tid]); } /** Set availability to true for elements in the lower set, otherwise false. * @param [out] available size [n] * @param [in] n of elements in the working set * @param [in] alpha dual coefficients, size [n] * @param [in] y class label, must be +/-1, size [n] * @param [in] C penalty factor */ template <typename math_t> __global__ void set_lower( bool* available, int n, const math_t* alpha, const math_t* y, const math_t* C) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < n) available[tid] = in_lower(alpha[tid], y[tid], C[tid]); } /** * Get the priority of the elements that are selected by new_idx. * * We look up these indices from the old working set (idx), and return their * priority increased by one. * * @param [out] new_priority size [n_selected] * @param [in] n_selected (less equal n_ws) * @param [in] new_idx size [n_selected] * @param [in] n_ws working set size * @param [in] idx indices in the old working set, size [n_ws] * @param [in] priority of elements in the old working set, size [n_ws] */ __global__ void update_priority(int* new_priority, int n_selected, const int* new_idx, int n_ws, const int* idx, const int* priority); } // namespace SVM } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/svm/linear.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <random> #include <type_traits> #include <common/nvtx.hpp> #include <cublas_v2.h> #include <cuml/linear_model/glm.hpp> #include <cuml/svm/svm_model.h> #include <cuml/svm/svm_parameter.h> #include <omp.h> #include <raft/core/handle.hpp> #include <raft/core/nvtx.hpp> #include <raft/distance/kernels.cuh> #include <raft/label/classlabels.cuh> #include <raft/linalg/gemm.cuh> #include <raft/linalg/gemv.cuh> #include <raft/linalg/map.cuh> #include <raft/linalg/matrix_vector_op.cuh> #include <raft/linalg/transpose.cuh> #include <raft/linalg/unary_op.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/tuple.h> #include <cuml/svm/linear.hpp> namespace ML { namespace SVM { namespace { inline int narrowDown(std::size_t n) { ASSERT(std::size_t(std::numeric_limits<int>::max()) >= n, "LinearSVM supports input sizes only within `int` range at this point (got = %zu)", n); return int(n); } /** The cuda kernel for classification. Call it via PredictClass::run(..). */ template <typename T, int BX = 32, int BY = 8> __global__ void predictClass( T* out, const T* z, const T* classes, const int nRows, const int coefCols) { const int i = threadIdx.y + blockIdx.y * BY; if (i >= nRows) return; const T* row = z + i * coefCols; T maxval = std::numeric_limits<T>::lowest(); int maxj = 0; for (int j = threadIdx.x; j < coefCols; j += BX) { T t = row[j]; if (t > maxval) { maxj = j; maxval = t; } } if (coefCols == 1 && threadIdx.x == 0) { out[i] = classes[maxval > 0]; } if constexpr (BX > 1) { typedef cub::WarpReduce<cub::KeyValuePair<int, T>, BX> WarpRed; __shared__ typename WarpRed::TempStorage warpStore[BY]; auto maxkv = WarpRed(warpStore[threadIdx.y]).Reduce(cub::KeyValuePair(maxj, maxval), cub::ArgMax()); if (threadIdx.x == 0) out[i] = classes[maxkv.key]; } else { // Some older nvcc versions complain on maxj being unused when BX == 1. std::ignore = maxj; } } /** * The wrapper struct on top of predictClass that recursively selects the best BX * (largest BX satisfying `BX < coefCols*2`) and then schedules the kernel launch. * * @tparam T - the data element type (e.g. float/double). * @tparam BlockSize - the total size of the cuda thread block (BX * BY). * @tparam BX - the size of the block along rows (nClasses dim). */ template <typename T, int BlockSize = 256, int BX = 32> struct PredictClass { static_assert(BX <= 32, "BX must be not larger than warpSize"); static_assert(BX <= BlockSize, "BX must be not larger than BlockSize"); /** * Predict classes using the scores. * * @param [out] out - vector of classes (nRows,) * @param [in] z - row-major matrix of scores (nRows, coefCols) * @param [in] classes - class labels in the problem (nClasses, ). * @param [in] nRows - number of rows in the data. * @param [in] coefCols - nClasses > 2 ? nClasses : 1 * @param [in] stream - the work stream. * */ static inline void run( T* out, const T* z, const T* classes, const int nRows, const int coefCols, cudaStream_t stream) { if constexpr (BX > 1) { if (coefCols <= (BX >> 1)) return PredictClass<T, BlockSize, std::max<int>(BX >> 1, 1)>::run( out, z, classes, nRows, coefCols, stream); } const int BY = BlockSize / BX; const dim3 bs(BX, BY, 1); const dim3 gs(1, raft::ceildiv(nRows, BY), 1); predictClass<T, BX, BY><<<gs, bs, 0, stream>>>(out, z, classes, nRows, coefCols); } }; /** The cuda kernel for classification. Call it via PredictProba::run(..). */ template <typename T, bool Log, bool Binary, int BX = 32, int BY = 8> __global__ void predictProba(T* out, const T* z, const int nRows, const int nClasses) { typedef cub::WarpReduce<T, BX> WarpRed; __shared__ typename WarpRed::TempStorage shm[BY]; typename WarpRed::TempStorage& warpStore = shm[threadIdx.y]; const int i = threadIdx.y + blockIdx.y * BY; if (i >= nRows) return; const T* rowIn = z + i * (Binary ? 1 : nClasses); T* rowOut = out + i * nClasses; // the largest 'z' in the row (to subtract it from z for numeric stability). T t = std::numeric_limits<T>::lowest(); T maxVal = t; int j = threadIdx.x; if constexpr (Binary) { t = rowIn[0]; maxVal = raft::myMax<T>(t, 0); t = T(j) * t; // set z[0] = 0, z[1] = t } else { for (; j < nClasses; j += BX) { t = rowIn[j]; maxVal = raft::myMax<T>(maxVal, t); } j -= BX; maxVal = WarpRed(warpStore).Reduce(maxVal, cub::Max()); maxVal = cub::ShuffleIndex<BX>(maxVal, 0, 0xFFFFFFFFU); } // At this point, either `j` refers to the last valid column idx worked // by the current thread, or `j` is negative. // We traverse the columns array in the opposite direction in the next // block. This allows us to avoid extra global memory accesses when // BX >= nClasses, which is a very common case. T et; // Numerator of the softmax. T smSum = 0; // Denominator of the softmax. while (j >= 0) { et = raft::myExp<T>(t - maxVal); smSum += et; if (j < BX) break; j -= BX; t = rowIn[j]; } smSum = WarpRed(warpStore).Reduce(smSum, cub::Sum()); smSum = cub::ShuffleIndex<BX>(smSum, 0, 0xFFFFFFFFU); // Now, either `j` refers to the first valid column idx worked by the // current thread, or `j` is negative (no work at all). // Traverse in the forward direction again to save the results. // Note, no extra memory reads when BX >= nClasses! if (j < 0) return; T d = Log ? -maxVal - raft::myLog<T>(smSum) : 1 / smSum; while (j < nClasses) { rowOut[j] = Log ? t + d : et * d; j += BX; if (j >= nClasses) break; t = rowIn[j]; if constexpr (!Log) et = raft::myExp<T>(t - maxVal); } } /** * The wrapper struct on top of predictProba that recursively selects the best BX * (largest BX satisfying `BX < coefCols*2`) and then schedules the kernel launch. * * @tparam T - the data element type (e.g. float/double). * @tparam BlockSize - the total size of the cuda thread block (BX * BY). * @tparam BX - the size of the block along rows (nClasses dim). */ template <typename T, int BlockSize = 256, int BX = 32> struct PredictProba { static_assert(BX <= 32, "BX must be not larger than warpSize"); static_assert(BX <= BlockSize, "BX must be not larger than BlockSize"); /** * Predict probabilities using the scores. * * @param [out] out - row-major matrix of probabilities (nRows, nClasses). * @param [in] z - row-major matrix of scores (nRows, Binary ? 1 : nClasses). * @param [in] nRows - number of rows in the data. * @param [in] nClasses - number of classes in the problem. * @param [in] log - whether to compute log-probabilities. * @param [in] stream - the work stream. */ static inline void run( T* out, const T* z, const int nRows, const int nClasses, const bool log, cudaStream_t stream) { if constexpr (BX > 2) { if (nClasses <= (BX >> 1)) return PredictProba<T, BlockSize, std::max<int>(BX >> 1, 2)>::run( out, z, nRows, nClasses, log, stream); } const int BY = BlockSize / BX; const bool Binary = BX == 2; const dim3 bs(BX, BY, 1); const dim3 gs(1, raft::ceildiv(nRows, BY), 1); if constexpr (Binary) ASSERT((void*)out != (void*)z, "PredictProba for the binary case cannot be inplace."); if (log) predictProba<T, true, Binary, BX, BY><<<gs, bs, 0, stream>>>(out, z, nRows, nClasses); else predictProba<T, false, Binary, BX, BY><<<gs, bs, 0, stream>>>(out, z, nRows, nClasses); } }; /** The loss function is the main hint for whether we solve classification or regression. */ inline bool isRegression(LinearSVMParams::Loss loss) { return loss == LinearSVMParams::EPSILON_INSENSITIVE || loss == LinearSVMParams::SQUARED_EPSILON_INSENSITIVE; } /** A functor that maps the multiclass problem onto the one-vs-rest binary problem */ template <typename T> struct OvrSelector { const T* classes; const int selected; __device__ T operator()(const T x) const { return x == classes[selected] ? 1 : 0; } }; /** * The linear part of the prediction. * * @param [in] handle - raft handle * @param [in] X - column-major matrix of size (nRows, nCols) * @param [in] w - row-major matrix of size [nCols + fitIntercept, coefCols] * @param [in] nRows - number of samples * @param [in] nCols - number of features * @param [in] coefCols - number of columns in `w` (`nClasses == 2 ? 1 : nClasses`) * @param [in] fitIntercept - whether to add the bias term * @param [out] out - row-major matrix of size [nRows, coefCols] * @param [in] stream - cuda stream (not synchronized) */ template <typename T> void predictLinear(const raft::handle_t& handle, const T* X, const T* w, const std::size_t nRows, const std::size_t nCols, const std::size_t coefCols, const bool fitIntercept, T* out, cudaStream_t stream) { raft::linalg::gemm<T>(handle, out, (T*)X, (T*)w, narrowDown(nRows), narrowDown(coefCols), narrowDown(nCols), false, true, false, stream); if (fitIntercept) raft::linalg::matrixVectorOp( out, out, w + nCols * coefCols, coefCols, nRows, true, true, cub::Sum(), stream); } /** A helper struct for selecting handle/stream depending on whether omp parallel is active. */ class WorkerHandle { private: raft::handle_t* handle_ptr = nullptr; public: int stream_id = 0; const raft::handle_t& handle; cudaStream_t stream; WorkerHandle(const raft::handle_t& handle, cudaStream_t stream) : handle(handle), stream(stream) { } WorkerHandle(const raft::handle_t& h, int stream_id) : handle_ptr{new raft::handle_t{h.get_next_usable_stream(stream_id)}}, stream_id(stream_id), handle(*handle_ptr), stream(h.get_next_usable_stream(stream_id)) { } ~WorkerHandle() { if (handle_ptr != nullptr) delete handle_ptr; } }; }; // namespace template <typename T> LinearSVMModel<T> LinearSVMModel<T>::allocate(const raft::handle_t& handle, const LinearSVMParams& params, const std::size_t nCols, const std::size_t nClasses) { auto stream = handle.get_stream(); auto res = rmm::mr::get_current_device_resource(); const std::size_t coefRows = nCols + params.fit_intercept; const std::size_t coefCols = nClasses <= 2 ? 1 : nClasses; const std::size_t wSize = coefRows * coefCols; const std::size_t cSize = nClasses >= 2 ? nClasses : 0; const std::size_t pSize = params.probability ? 2 * coefCols : 0; auto bytes = static_cast<T*>(res->allocate(sizeof(T) * (wSize + cSize + pSize), stream)); return LinearSVMModel<T>{/* .w */ bytes, /* .classes */ cSize > 0 ? bytes + wSize : nullptr, /* .probScale */ pSize > 0 ? bytes + wSize + cSize : nullptr, /* .nClasses */ cSize, /* .coefRows */ coefRows}; } template <typename T> void LinearSVMModel<T>::free(const raft::handle_t& handle, LinearSVMModel<T>& model) { auto stream = handle.get_stream(); auto res = rmm::mr::get_current_device_resource(); const std::size_t coefRows = model.coefRows; const std::size_t coefCols = model.coefCols(); const std::size_t wSize = coefRows * coefCols; const std::size_t cSize = model.nClasses; const std::size_t pSize = model.probScale == nullptr ? 2 * coefCols : 0; res->deallocate(model.w, sizeof(T) * (wSize + cSize + pSize), stream); model.w = nullptr; model.classes = nullptr; model.probScale = nullptr; } template <typename T> LinearSVMModel<T> LinearSVMModel<T>::fit(const raft::handle_t& handle, const LinearSVMParams& params, const T* X, const std::size_t nRows, const std::size_t nCols, const T* y, const T* sampleWeight) { cudaStream_t stream = handle.get_stream(); rmm::device_uvector<T> classesBuf(0, stream); const std::size_t nClasses = isRegression(params.loss) ? 0 : raft::label::getUniquelabels(classesBuf, (T*)y, nRows, stream); ASSERT(isRegression(params.loss) || nClasses > 1, "Found only one unique value in the target data, whereas at least two are required " "(one-class classification does not make sense)"); // from now on, nClasses == 0 implies we solve the regression problem. auto model = LinearSVMModel<T>::allocate(handle, params, nCols, nClasses); if (model.classes != nullptr) raft::copy(model.classes, classesBuf.data(), nClasses, stream); const int coefCols = narrowDown(model.coefCols()); const std::size_t coefRows = model.coefRows; raft::common::nvtx::range fun_scope("Trace::LinearSVMModel::fit"); auto nCols1 = nCols + int(params.fit_intercept && params.penalized_intercept); T iC = params.C > 0 ? (1.0 / params.C) : 1.0; T* X1 = (T*)X; rmm::device_uvector<T> X1Buf(0, stream); if (params.fit_intercept && params.penalized_intercept) { X1Buf.resize(nCols1 * nRows, stream); X1 = X1Buf.data(); raft::copy(X1, X, nCols * nRows, stream); thrust::device_ptr<T> p(X1 + nCols * nRows); thrust::fill(thrust::cuda::par.on(stream), p, p + nRows, 1.0); } ML::GLM::qn_params qn_pams; switch (params.loss) { case LinearSVMParams::HINGE: qn_pams.loss = ML::GLM::QN_LOSS_SVC_L1; break; case LinearSVMParams::SQUARED_HINGE: qn_pams.loss = ML::GLM::QN_LOSS_SVC_L2; break; case LinearSVMParams::EPSILON_INSENSITIVE: qn_pams.loss = ML::GLM::QN_LOSS_SVR_L1; break; case LinearSVMParams::SQUARED_EPSILON_INSENSITIVE: qn_pams.loss = ML::GLM::QN_LOSS_SVR_L2; break; default: break; } qn_pams.fit_intercept = params.fit_intercept && !params.penalized_intercept; qn_pams.penalty_l1 = params.penalty == LinearSVMParams::L1 ? iC : 0.0; qn_pams.penalty_l2 = params.penalty == LinearSVMParams::L2 ? iC : 0.0; qn_pams.penalty_normalized = true; qn_pams.max_iter = params.max_iter; qn_pams.grad_tol = params.grad_tol; qn_pams.change_tol = params.change_tol; qn_pams.linesearch_max_iter = params.linesearch_max_iter; qn_pams.lbfgs_memory = params.lbfgs_memory; qn_pams.verbose = params.verbose; ML::GLM::qn_params qn_pams_logistic = qn_pams; qn_pams_logistic.loss = ML::GLM::QN_LOSS_LOGISTIC; qn_pams_logistic.fit_intercept = true; qn_pams_logistic.penalty_l1 = 0; qn_pams_logistic.penalty_l2 = 1 / T(1 + nRows); // L2 regularization reflects the flat prior. T* y1 = (T*)y; T* w1 = model.w; T* ps1 = model.probScale; rmm::device_uvector<T> y1Buf(0, stream); rmm::device_uvector<T> w1Buf(0, stream); rmm::device_uvector<T> psBuf(0, stream); if (nClasses > 1) { y1Buf.resize(nRows * coefCols, stream); y1 = y1Buf.data(); } if (coefCols > 1) { w1Buf.resize(coefCols * coefRows, stream); w1 = w1Buf.data(); if (params.probability) { psBuf.resize(2 * coefCols, stream); ps1 = psBuf.data(); } } RAFT_CUDA_TRY(cudaMemsetAsync(w1, 0, coefCols * coefRows * sizeof(T), stream)); if (params.probability) { thrust::device_ptr<thrust::tuple<T, T>> p((thrust::tuple<T, T>*)ps1); thrust::fill(thrust::cuda::par.on(stream), p, p + coefCols, thrust::make_tuple(T(1), T(0))); } // one-vs-rest logic goes over each class std::vector<T> targets(coefCols); std::vector<int> num_iters(coefCols); const int n_streams = coefCols > 1 ? handle.get_stream_pool_size() : 1; bool parallel = n_streams > 1; #pragma omp parallel for num_threads(n_streams) if (parallel) for (int class_i = 0; class_i < coefCols; class_i++) { T* yi = y1 + nRows * class_i; T* wi = w1 + coefRows * class_i; auto worker = parallel ? WorkerHandle(handle, omp_get_thread_num()) : WorkerHandle(handle, stream); if (nClasses > 1) { raft::linalg::unaryOp( yi, y, nRows, OvrSelector<T>{model.classes, nClasses == 2 ? 1 : class_i}, worker.stream); } T target; int num_iters; GLM::qnFit<T>(worker.handle, qn_pams, X1, true, yi, narrowDown(nRows), narrowDown(nCols1), // regression: C == 1; classification: C == 2 nClasses == 0 ? 1 : 2, wi, &target, &num_iters, (T*)sampleWeight, T(params.epsilon)); if (!params.probability) continue; // Calibrate probabilities T* psi = ps1 + 2 * class_i; rmm::device_uvector<T> xwBuf(nRows, worker.stream); T* xw = xwBuf.data(); predictLinear(worker.handle, X, wi, nRows, nCols, 1, params.fit_intercept, xw, worker.stream); GLM::qnFit<T>(worker.handle, qn_pams_logistic, xw, false, yi, narrowDown(nRows), 1, 2, psi, &target, &num_iters, (T*)sampleWeight); } if (parallel) handle.sync_stream_pool(); if (coefCols > 1) { raft::linalg::transpose(handle, w1, model.w, coefRows, coefCols, stream); if (params.probability) raft::linalg::transpose(handle, ps1, model.probScale, 2, coefCols, stream); } return model; } template <typename T> void LinearSVMModel<T>::decisionFunction(const raft::handle_t& handle, const LinearSVMParams& params, const LinearSVMModel<T>& model, const T* X, const std::size_t nRows, const std::size_t nCols, T* out) { ASSERT(!isRegression(params.loss), "Decision function is not available for the regression model"); predictLinear(handle, X, model.w, nRows, nCols, model.coefCols(), params.fit_intercept, out, handle.get_stream()); } template <typename T> void LinearSVMModel<T>::predict(const raft::handle_t& handle, const LinearSVMParams& params, const LinearSVMModel<T>& model, const T* X, const std::size_t nRows, const std::size_t nCols, T* out) { auto stream = handle.get_stream(); const auto coefCols = model.coefCols(); if (isRegression(params.loss)) return predictLinear( handle, X, model.w, nRows, nCols, coefCols, params.fit_intercept, out, stream); rmm::device_uvector<T> temp(nRows * coefCols, stream); predictLinear( handle, X, model.w, nRows, nCols, coefCols, params.fit_intercept, temp.data(), stream); PredictClass<T>::run(out, temp.data(), model.classes, nRows, coefCols, stream); } template <typename T> void LinearSVMModel<T>::predictProba(const raft::handle_t& handle, const LinearSVMParams& params, const LinearSVMModel<T>& model, const T* X, const std::size_t nRows, const std::size_t nCols, const bool log, T* out) { ASSERT(!isRegression(params.loss), "Predicting probabilities is not available for the regression model"); ASSERT( params.probability, "The model was not trained to output probabilities (LinearSVMParams.probability == false)."); ASSERT(model.probScale != nullptr, "The model was not trained to output probabilities (model.probScale == nullptr)."); auto stream = handle.get_stream(); const auto coefCols = model.coefCols(); rmm::device_uvector<T> temp(nRows * coefCols, stream); // linear part predictLinear(handle, X, model.w, nRows, nCols, coefCols, params.fit_intercept, out, stream); // probability calibration raft::linalg::matrixVectorOp( temp.data(), out, model.probScale, model.probScale + coefCols, coefCols, nRows, true, true, [] __device__(const T x, const T a, const T b) { return a * x + b; }, stream); // apply sigmoid/softmax PredictProba<T>::run(out, temp.data(), nRows, model.nClasses, log, stream); } template class LinearSVMModel<float>; template class LinearSVMModel<double>; static_assert(std::is_standard_layout<LinearSVMParams>::value, "Must be a standard-layout type for C-interop."); static_assert(std::is_standard_layout<LinearSVMModel<float>>::value, "Must be a standard-layout type for C-interop."); static_assert(std::is_standard_layout<LinearSVMModel<double>>::value, "Must be a standard-layout type for C-interop."); } // namespace SVM } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer1.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/device_initialization/gpu.cuh> #include <cuml/experimental/fil/detail/infer/gpu.cuh> #include <cuml/experimental/fil/detail/specializations/device_initialization_macros.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::gpu, 1) } namespace device_initialization { CUML_FIL_INITIALIZE_DEVICE(template, 1) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer6.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/infer/cpu.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::cpu, 6) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer2.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/infer/cpu.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::cpu, 2) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer3.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/infer/cpu.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::cpu, 3) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer3.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/device_initialization/gpu.cuh> #include <cuml/experimental/fil/detail/infer/gpu.cuh> #include <cuml/experimental/fil/detail/specializations/device_initialization_macros.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::gpu, 3) } namespace device_initialization { CUML_FIL_INITIALIZE_DEVICE(template, 3) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer7.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/infer/cpu.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::cpu, 7) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer7.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/device_initialization/gpu.cuh> #include <cuml/experimental/fil/detail/infer/gpu.cuh> #include <cuml/experimental/fil/detail/specializations/device_initialization_macros.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::gpu, 7) } namespace device_initialization { CUML_FIL_INITIALIZE_DEVICE(template, 7) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer1.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/infer/cpu.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::cpu, 1) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer4.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/infer/cpu.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::cpu, 4) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer4.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/device_initialization/gpu.cuh> #include <cuml/experimental/fil/detail/infer/gpu.cuh> #include <cuml/experimental/fil/detail/specializations/device_initialization_macros.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::gpu, 4) } namespace device_initialization { CUML_FIL_INITIALIZE_DEVICE(template, 4) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer5.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/device_initialization/gpu.cuh> #include <cuml/experimental/fil/detail/infer/gpu.cuh> #include <cuml/experimental/fil/detail/specializations/device_initialization_macros.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::gpu, 5) } namespace device_initialization { CUML_FIL_INITIALIZE_DEVICE(template, 5) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer0.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/infer/cpu.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::cpu, 0) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer6.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/device_initialization/gpu.cuh> #include <cuml/experimental/fil/detail/infer/gpu.cuh> #include <cuml/experimental/fil/detail/specializations/device_initialization_macros.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::gpu, 6) } namespace device_initialization { CUML_FIL_INITIALIZE_DEVICE(template, 6) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer0.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/device_initialization/gpu.cuh> #include <cuml/experimental/fil/detail/infer/gpu.cuh> #include <cuml/experimental/fil/detail/specializations/device_initialization_macros.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::gpu, 0) } namespace device_initialization { CUML_FIL_INITIALIZE_DEVICE(template, 0) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer2.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/device_initialization/gpu.cuh> #include <cuml/experimental/fil/detail/infer/gpu.cuh> #include <cuml/experimental/fil/detail/specializations/device_initialization_macros.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::gpu, 2) } namespace device_initialization { CUML_FIL_INITIALIZE_DEVICE(template, 2) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/experimental
rapidsai_public_repos/cuml/cpp/src/experimental/fil/infer5.cpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/experimental/fil/detail/infer/cpu.hpp> #include <cuml/experimental/fil/detail/specializations/infer_macros.hpp> namespace ML { namespace experimental { namespace fil { namespace detail { namespace inference { CUML_FIL_INFER_ALL(template, raft_proto::device_type::cpu, 5) } } // namespace detail } // namespace fil } // namespace experimental } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsne/utils.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <assert.h> #include <math.h> #include <stdarg.h> #include <stdbool.h> #include <stdio.h> #include <cuml/common/logger.hpp> #include <raft/linalg/eltwise.cuh> #include <raft/linalg/norm.cuh> #include <cuda_runtime.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/transform.h> #include <raft/random/rng.cuh> #include <raft/stats/sum.cuh> #include <sys/time.h> #include <chrono> #include <iostream> #include <unistd.h> #include <raft/util/device_atomics.cuh> /** * @brief Performs P + P.T. * @param[out] vector: The output vector you want to overwrite with randomness. * @param[in] minimum: The minimum value in the output vector you want. * @param[in] maximum: The maximum value in the output vector you want. * @param[in] size: The size of the output vector. * @param[in] stream: The GPU stream. * @param[in] seed: If seed == -1, then the output is pure randomness. If >= 0, then you can * reproduce TSNE. */ template <typename value_t = float> void random_vector(value_t* vector, const value_t minimum, const value_t maximum, const int size, cudaStream_t stream, long long seed = -1) { if (seed <= 0) { // Get random seed based on time of day struct timeval tp; gettimeofday(&tp, NULL); seed = tp.tv_sec * 1000 + tp.tv_usec; } raft::random::Rng random(seed); random.uniform<value_t>(vector, size, minimum, maximum, stream); } long start, end; struct timeval timecheck; double SymmetrizeTime = 0, DistancesTime = 0, NormalizeTime = 0, PerplexityTime = 0, BoundingBoxKernel_time = 0, ClearKernel1_time = 0, TreeBuildingKernel_time = 0, ClearKernel2_time = 0, SummarizationKernel_time = 0, SortKernel_time = 0, RepulsionTime = 0, Reduction_time = 0, attractive_time = 0, IntegrationKernel_time = 0; // To silence warnings #define START_TIMER \ if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { \ gettimeofday(&timecheck, NULL); \ start = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; \ } #define END_TIMER(add_onto) \ if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { \ gettimeofday(&timecheck, NULL); \ end = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; \ add_onto += (end - start); \ } #define PRINT_TIMES \ if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { \ double total = (SymmetrizeTime + DistancesTime + NormalizeTime + PerplexityTime + \ BoundingBoxKernel_time + ClearKernel1_time + TreeBuildingKernel_time + \ ClearKernel2_time + SummarizationKernel_time + SortKernel_time + \ RepulsionTime + Reduction_time + attractive_time + IntegrationKernel_time) / \ 100.0; \ CUML_LOG_DEBUG( \ "SymmetrizeTime = %.lf (%.lf)\n" \ "DistancesTime = %.lf (%.lf)\n" \ "NormalizeTime = %.lf (%.lf)\n" \ "PerplexityTime = %.lf (%.lf)\n" \ "BoundingBoxKernel_time = %.lf (%.lf)\n" \ "ClearKernel1_time = %.lf (%.lf)\n" \ "TreeBuildingKernel_time = %.lf (%.lf)\n" \ "ClearKernel2_time = %.lf (%.lf)\n" \ "SummarizationKernel_time = %.lf (%.lf)\n" \ "SortKernel_time = %.lf (%.lf)\n" \ "RepulsionTime = %.lf (%.lf)\n" \ "Reduction_time = %.lf (%.lf)\n" \ "attractive_time = %.lf (%.lf)\n" \ "IntegrationKernel_time = %.lf (%.lf)\n" \ "TOTAL TIME = %.lf", \ SymmetrizeTime, \ SymmetrizeTime / total, \ DistancesTime, \ DistancesTime / total, \ NormalizeTime, \ NormalizeTime / total, \ PerplexityTime, \ PerplexityTime / total, \ BoundingBoxKernel_time, \ BoundingBoxKernel_time / total, \ ClearKernel1_time, \ ClearKernel1_time / total, \ TreeBuildingKernel_time, \ TreeBuildingKernel_time / total, \ ClearKernel2_time, \ ClearKernel2_time / total, \ SummarizationKernel_time, \ SummarizationKernel_time / total, \ SortKernel_time, \ SortKernel_time / total, \ RepulsionTime, \ RepulsionTime / total, \ Reduction_time, \ Reduction_time / total, \ attractive_time, \ attractive_time / total, \ IntegrationKernel_time, \ IntegrationKernel_time / total, \ total * 100.0); \ } template <typename value_t, typename value_idx, int TPB = 256> __global__ void min_max_kernel( const value_t* Y, const value_idx n, value_t* min, value_t* max, bool find_min = true) { auto tid = threadIdx.x + blockDim.x * blockIdx.x; typedef cub::BlockReduce<value_t, TPB> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage_min; __shared__ typename BlockReduce::TempStorage temp_storage_max; value_t thread_min, thread_max; if (tid < n) { thread_max = Y[tid]; if (find_min) thread_min = thread_max; } else { if (find_min) thread_min = std::numeric_limits<value_t>::max(); thread_max = std::numeric_limits<value_t>::lowest(); } value_t block_min, block_max; if (find_min) { block_min = BlockReduce(temp_storage_min).Reduce(thread_min, cub::Min()); } block_max = BlockReduce(temp_storage_max).Reduce(thread_max, cub::Max()); // results stored in first thread of block if (threadIdx.x == 0) { if (find_min) atomicMin(min, block_min); atomicMax(max, block_max); } } /** * CUDA kernel to compute KL divergence */ template <typename value_idx, typename value_t> __global__ void compute_kl_div_k(const value_t* Ps, const value_t* Qs, value_t* __restrict__ KL_divs, const value_idx NNZ) { const auto index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= NNZ) return; const value_t P = Ps[index]; const value_t Q = max(Qs[index], FLT_EPSILON); KL_divs[index] = P * __logf(__fdividef(max(P, FLT_EPSILON), Q)); } /** * Compute KL divergence */ template <typename value_t> value_t compute_kl_div( value_t* __restrict__ Ps, value_t* Qs, value_t* KL_divs, const size_t NNZ, cudaStream_t stream) { value_t P_sum = thrust::reduce(rmm::exec_policy(stream), Ps, Ps + NNZ); raft::linalg::scalarMultiply(Ps, Ps, 1.0f / P_sum, NNZ, stream); value_t Q_sum = thrust::reduce(rmm::exec_policy(stream), Qs, Qs + NNZ); raft::linalg::scalarMultiply(Qs, Qs, 1.0f / Q_sum, NNZ, stream); const size_t block = 128; const size_t grid = raft::ceildiv(NNZ, block); compute_kl_div_k<<<grid, block, 0, stream>>>(Ps, Qs, KL_divs, NNZ); return thrust::reduce(rmm::exec_policy(stream), KL_divs, KL_divs + NNZ); } template <typename value_t> __device__ value_t compute_q(value_t dist, value_t dof) { const value_t exponent = (dof + 1.0f) / 2.0f; const value_t Q = __powf(dof / (dof + dist), exponent); return Q; }
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsne/barnes_hut_tsne.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "barnes_hut_kernels.cuh" #include "utils.cuh" #include <cuml/common/logger.hpp> #include <cuml/manifold/tsne.h> #include <raft/core/handle.hpp> #include <raft/linalg/eltwise.cuh> #include <raft/util/cudart_utils.hpp> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> namespace ML { namespace TSNE { /** * @brief Fast Dimensionality reduction via TSNE using the Barnes Hut O(NlogN) approximation. * @param[in] VAL: The values in the attractive forces COO matrix. * @param[in] COL: The column indices in the attractive forces COO matrix. * @param[in] ROW: The row indices in the attractive forces COO matrix. * @param[in] NNZ: The number of non zeros in the attractive forces COO matrix. * @param[in] handle: The GPU handle. * @param[out] Y: The final embedding. Will overwrite this internally. * @param[in] n: Number of rows in data X. * @param[in] params: Parameters for TSNE model. */ template <typename value_idx, typename value_t> value_t Barnes_Hut(value_t* VAL, const value_idx* COL, const value_idx* ROW, const value_idx NNZ, const raft::handle_t& handle, value_t* Y, const value_idx n, const TSNEParams& params) { cudaStream_t stream = handle.get_stream(); value_t kl_div = 0; // Get device properties //--------------------------------------------------- const int blocks = raft::getMultiProcessorCount(); auto nnodes = n * 2; if (nnodes < 1024 * blocks) nnodes = 1024 * blocks; while ((nnodes & (32 - 1)) != 0) nnodes++; nnodes--; CUML_LOG_DEBUG("N_nodes = %d blocks = %d", nnodes, blocks); // Allocate more space // rmm::device_uvector<unsigned> errl(1, stream); rmm::device_scalar<unsigned> limiter(stream); rmm::device_scalar<value_idx> maxdepthd(stream); rmm::device_scalar<value_idx> bottomd(stream); rmm::device_scalar<value_t> radiusd(stream); BH::InitializationKernel<<<1, 1, 0, stream>>>(/*errl.data(),*/ limiter.data(), maxdepthd.data(), radiusd.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); const value_idx FOUR_NNODES = 4 * nnodes; const value_idx FOUR_N = 4 * n; const float theta_squared = params.theta * params.theta; const value_idx NNODES = nnodes; // Actual allocations rmm::device_uvector<value_idx> startl(nnodes + 1, stream); rmm::device_uvector<value_idx> childl((nnodes + 1) * 4, stream); rmm::device_uvector<value_t> massl(nnodes + 1, stream); thrust::device_ptr<value_t> begin_massl = thrust::device_pointer_cast(massl.data()); thrust::fill(thrust::cuda::par.on(stream), begin_massl, begin_massl + (nnodes + 1), 1.0f); rmm::device_uvector<value_t> maxxl(blocks * FACTOR1, stream); rmm::device_uvector<value_t> maxyl(blocks * FACTOR1, stream); rmm::device_uvector<value_t> minxl(blocks * FACTOR1, stream); rmm::device_uvector<value_t> minyl(blocks * FACTOR1, stream); // SummarizationKernel rmm::device_uvector<value_idx> countl(nnodes + 1, stream); // SortKernel rmm::device_uvector<value_idx> sortl(nnodes + 1, stream); // RepulsionKernel rmm::device_uvector<value_t> rep_forces((nnodes + 1) * 2, stream); rmm::device_uvector<value_t> attr_forces(n * 2, stream); // n*2 double for reduction sum rmm::device_scalar<value_t> Z_norm(stream); rmm::device_scalar<value_t> radiusd_squared(stream); // Apply rmm::device_uvector<value_t> gains_bh(n * 2, stream); thrust::device_ptr<value_t> begin_gains_bh = thrust::device_pointer_cast(gains_bh.data()); thrust::fill(handle.get_thrust_policy(), begin_gains_bh, begin_gains_bh + (n * 2), 1.0f); rmm::device_uvector<value_t> old_forces(n * 2, stream); RAFT_CUDA_TRY(cudaMemsetAsync(old_forces.data(), 0, sizeof(value_t) * n * 2, stream)); rmm::device_uvector<value_t> YY((nnodes + 1) * 2, stream); if (params.initialize_embeddings) { random_vector(YY.data(), -0.0001f, 0.0001f, (nnodes + 1) * 2, stream, params.random_state); } else { raft::copy(YY.data(), Y, n, stream); raft::copy(YY.data() + nnodes + 1, Y + n, n, stream); } rmm::device_uvector<value_t> tmp(NNZ, stream); value_t* Qs = tmp.data(); value_t* KL_divs = tmp.data(); // Set cache levels for faster algorithm execution //--------------------------------------------------- RAFT_CUDA_TRY( cudaFuncSetCacheConfig(BH::BoundingBoxKernel<value_idx, value_t>, cudaFuncCachePreferShared)); RAFT_CUDA_TRY( cudaFuncSetCacheConfig(BH::TreeBuildingKernel<value_idx, value_t>, cudaFuncCachePreferL1)); RAFT_CUDA_TRY(cudaFuncSetCacheConfig(BH::ClearKernel1<value_idx>, cudaFuncCachePreferL1)); RAFT_CUDA_TRY( cudaFuncSetCacheConfig(BH::ClearKernel2<value_idx, value_t>, cudaFuncCachePreferL1)); RAFT_CUDA_TRY( cudaFuncSetCacheConfig(BH::SummarizationKernel<value_idx, value_t>, cudaFuncCachePreferShared)); RAFT_CUDA_TRY(cudaFuncSetCacheConfig(BH::SortKernel<value_idx>, cudaFuncCachePreferL1)); RAFT_CUDA_TRY( cudaFuncSetCacheConfig(BH::RepulsionKernel<value_idx, value_t>, cudaFuncCachePreferL1)); RAFT_CUDA_TRY( cudaFuncSetCacheConfig(BH::attractive_kernel_bh<value_idx, value_t>, cudaFuncCachePreferL1)); RAFT_CUDA_TRY( cudaFuncSetCacheConfig(BH::IntegrationKernel<value_idx, value_t>, cudaFuncCachePreferL1)); // Do gradient updates //--------------------------------------------------- CUML_LOG_DEBUG("Start gradient updates!"); value_t momentum = params.pre_momentum; value_t learning_rate = params.pre_learning_rate; for (int iter = 0; iter < params.max_iter; iter++) { RAFT_CUDA_TRY(cudaMemsetAsync(static_cast<void*>(rep_forces.data()), 0, rep_forces.size() * sizeof(*rep_forces.data()), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(static_cast<void*>(attr_forces.data()), 0, attr_forces.size() * sizeof(*attr_forces.data()), stream)); BH::Reset_Normalization<<<1, 1, 0, stream>>>( Z_norm.data(), radiusd_squared.data(), bottomd.data(), NNODES, radiusd.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); if (iter == params.exaggeration_iter) { momentum = params.post_momentum; // Divide perplexities const value_t div = 1.0f / params.early_exaggeration; raft::linalg::scalarMultiply(VAL, VAL, div, NNZ, stream); learning_rate = params.post_learning_rate; } START_TIMER; BH::BoundingBoxKernel<<<blocks * FACTOR1, THREADS1, 0, stream>>>(startl.data(), childl.data(), massl.data(), YY.data(), YY.data() + nnodes + 1, maxxl.data(), maxyl.data(), minxl.data(), minyl.data(), FOUR_NNODES, NNODES, n, limiter.data(), radiusd.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); END_TIMER(BoundingBoxKernel_time); START_TIMER; BH::ClearKernel1<<<blocks, 1024, 0, stream>>>(childl.data(), FOUR_NNODES, FOUR_N); RAFT_CUDA_TRY(cudaPeekAtLastError()); END_TIMER(ClearKernel1_time); START_TIMER; BH::TreeBuildingKernel<<<blocks * FACTOR2, THREADS2, 0, stream>>>( /*errl.data(),*/ childl.data(), YY.data(), YY.data() + nnodes + 1, NNODES, n, maxdepthd.data(), bottomd.data(), radiusd.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); END_TIMER(TreeBuildingKernel_time); START_TIMER; BH::ClearKernel2<<<blocks * 1, 1024, 0, stream>>>( startl.data(), massl.data(), NNODES, bottomd.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); END_TIMER(ClearKernel2_time); START_TIMER; BH::SummarizationKernel<<<blocks * FACTOR3, THREADS3, 0, stream>>>(countl.data(), childl.data(), massl.data(), YY.data(), YY.data() + nnodes + 1, NNODES, n, bottomd.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); END_TIMER(SummarizationKernel_time); START_TIMER; BH::SortKernel<<<blocks * FACTOR4, THREADS4, 0, stream>>>( sortl.data(), countl.data(), startl.data(), childl.data(), NNODES, n, bottomd.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); END_TIMER(SortKernel_time); START_TIMER; BH::RepulsionKernel<<<blocks * FACTOR5, THREADS5, 0, stream>>>( /*errl.data(),*/ params.theta, params.epssq, sortl.data(), childl.data(), massl.data(), YY.data(), YY.data() + nnodes + 1, rep_forces.data(), rep_forces.data() + nnodes + 1, Z_norm.data(), theta_squared, NNODES, FOUR_NNODES, n, radiusd_squared.data(), maxdepthd.data()); RAFT_CUDA_TRY(cudaPeekAtLastError()); END_TIMER(RepulsionTime); START_TIMER; BH::Find_Normalization<<<1, 1, 0, stream>>>(Z_norm.data(), n); RAFT_CUDA_TRY(cudaPeekAtLastError()); END_TIMER(Reduction_time); START_TIMER; // TODO: Calculate Kullback-Leibler divergence // For general embedding dimensions bool last_iter = iter == params.max_iter - 1; BH::attractive_kernel_bh<<<raft::ceildiv(NNZ, (value_idx)1024), 1024, 0, stream>>>( VAL, COL, ROW, YY.data(), YY.data() + nnodes + 1, attr_forces.data(), attr_forces.data() + n, last_iter ? Qs : nullptr, NNZ, fmaxf(params.dim - 1, 1)); RAFT_CUDA_TRY(cudaPeekAtLastError()); END_TIMER(attractive_time); if (last_iter) { kl_div = compute_kl_div(VAL, Qs, KL_divs, NNZ, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); } START_TIMER; BH::IntegrationKernel<<<blocks * FACTOR6, THREADS6, 0, stream>>>(learning_rate, momentum, params.early_exaggeration, YY.data(), YY.data() + nnodes + 1, attr_forces.data(), attr_forces.data() + n, rep_forces.data(), rep_forces.data() + nnodes + 1, gains_bh.data(), gains_bh.data() + n, old_forces.data(), old_forces.data() + n, Z_norm.data(), n); RAFT_CUDA_TRY(cudaPeekAtLastError()); END_TIMER(IntegrationKernel_time); } PRINT_TIMES; // Copy final YY into true output Y raft::copy(Y, YY.data(), n, stream); raft::copy(Y + n, YY.data() + nnodes + 1, n, stream); return kl_div; } } // namespace TSNE } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsne/exact_tsne.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "exact_kernels.cuh" #include "utils.cuh" #include <cuml/common/logger.hpp> #include <raft/util/cudart_utils.hpp> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> namespace ML { namespace TSNE { /** * @brief Slower Dimensionality reduction via TSNE using the Exact method O(N^2). * @param[in] VAL: The values in the attractive forces COO matrix. * @param[in] COL: The column indices in the attractive forces COO matrix. * @param[in] ROW: The row indices in the attractive forces COO matrix. * @param[in] NNZ: The number of non zeros in the attractive forces COO matrix. * @param[in] handle: The GPU handle. * @param[out] Y: The final embedding. Will overwrite this internally. * @param[in] n: Number of rows in data X. * @param[in] params: Parameters for TSNE model. */ template <typename value_idx, typename value_t> value_t Exact_TSNE(value_t* VAL, const value_idx* COL, const value_idx* ROW, const value_idx NNZ, const raft::handle_t& handle, value_t* Y, const value_idx n, const TSNEParams& params) { cudaStream_t stream = handle.get_stream(); value_t kl_div = 0; const value_idx dim = params.dim; if (params.initialize_embeddings) random_vector(Y, -0.0001f, 0.0001f, n * dim, stream, params.random_state); // Allocate space //--------------------------------------------------- CUML_LOG_DEBUG("Now allocating memory for TSNE."); rmm::device_uvector<value_t> norm(n, stream); rmm::device_uvector<value_t> Z_sum(2 * n, stream); rmm::device_uvector<value_t> means(dim, stream); rmm::device_uvector<value_t> attract(n * dim, stream); rmm::device_uvector<value_t> repel(n * dim, stream); rmm::device_uvector<value_t> velocity(n * dim, stream); RAFT_CUDA_TRY( cudaMemsetAsync(velocity.data(), 0, velocity.size() * sizeof(*velocity.data()), stream)); rmm::device_uvector<value_t> gains(n * dim, stream); thrust::device_ptr<value_t> begin = thrust::device_pointer_cast(gains.data()); thrust::fill(thrust::cuda::par.on(stream), begin, begin + n * dim, 1.0f); rmm::device_uvector<value_t> gradient(n * dim, stream); rmm::device_uvector<value_t> tmp(NNZ, stream); value_t* Qs = tmp.data(); value_t* KL_divs = tmp.data(); //--------------------------------------------------- // Calculate degrees of freedom //--------------------------------------------------- const float degrees_of_freedom = fmaxf(dim - 1, 1); const float df_power = -(degrees_of_freedom + 1.0f) / 2.0f; const float recp_df = 1.0f / degrees_of_freedom; const float C = 2.0f * (degrees_of_freedom + 1.0f) / degrees_of_freedom; CUML_LOG_DEBUG("Start gradient updates!"); float momentum = params.pre_momentum; float learning_rate = params.pre_learning_rate; auto exaggeration = params.early_exaggeration; bool check_convergence = false; for (int iter = 0; iter < params.max_iter; iter++) { check_convergence = ((iter % 10) == 0) and (iter > params.exaggeration_iter); if (iter == params.exaggeration_iter) { momentum = params.post_momentum; learning_rate = params.post_learning_rate; exaggeration = 1.0f; } // Get row norm of Y raft::linalg::rowNorm(norm.data(), Y, dim, n, raft::linalg::L2Norm, false, stream); bool last_iter = iter == params.max_iter - 1; // Compute attractive forces TSNE::attractive_forces(VAL, COL, ROW, Y, norm.data(), attract.data(), last_iter ? Qs : nullptr, NNZ, n, dim, fmaxf(params.dim - 1, 1), stream); if (last_iter) { kl_div = compute_kl_div(VAL, Qs, KL_divs, NNZ, stream); } // Compute repulsive forces const float Z = TSNE::repulsive_forces( Y, repel.data(), norm.data(), Z_sum.data(), n, dim, df_power, recp_df, stream); // Apply / integrate forces const float gradient_norm = TSNE::apply_forces(Y, velocity.data(), attract.data(), repel.data(), means.data(), gains.data(), Z, learning_rate, C, exaggeration, momentum, dim, n, params.min_gain, gradient.data(), check_convergence, stream); if (check_convergence) { if (iter % 100 == 0) { CUML_LOG_DEBUG("Z at iter = %d = %f and gradient norm = %f", iter, Z, gradient_norm); } if (gradient_norm < params.min_grad_norm) { CUML_LOG_DEBUG( "Gradient norm = %f <= min_grad_norm = %f. Early stopped at iter = " "%d", gradient_norm, params.min_grad_norm, iter); break; } } else { if (iter % 100 == 0) { CUML_LOG_DEBUG("Z at iter = %d = %f", iter, Z); } } } return kl_div; } } // namespace TSNE } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsne/distances.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/neighbors/knn_sparse.hpp> #include <raft/core/handle.hpp> #include <raft/distance/distance_types.hpp> #include <raft/linalg/eltwise.cuh> #include <raft/sparse/coo.hpp> #include <raft/sparse/linalg/symmetrize.cuh> #include <raft/sparse/selection/knn.cuh> #include <raft/util/cudart_utils.hpp> #include <selection/knn.cuh> #include <cuml/manifold/common.hpp> #include <raft/core/error.hpp> #include "utils.cuh" #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/functional.h> #include <thrust/transform_reduce.h> namespace ML { namespace TSNE { /** * @brief Uses FAISS's KNN to find the top n_neighbors. This speeds up the attractive forces. * @param[in] input: dense/sparse manifold input * @param[out] indices: The output indices from KNN. * @param[out] distances: The output sorted distances from KNN. * @param[in] n_neighbors: The number of nearest neighbors you want. * @param[in] stream: The GPU stream. * @param[in] metric: The distance metric. */ template <typename tsne_input, typename value_idx, typename value_t> void get_distances(const raft::handle_t& handle, tsne_input& input, knn_graph<value_idx, value_t>& k_graph, cudaStream_t stream, raft::distance::DistanceType metric, value_t p); // dense, int64 indices template <> void get_distances(const raft::handle_t& handle, manifold_dense_inputs_t<float>& input, knn_graph<int64_t, float>& k_graph, cudaStream_t stream, raft::distance::DistanceType metric, float p) { // TODO: for TSNE transform first fit some points then transform with 1/(1+d^2) // #861 std::vector<float*> input_vec = {input.X}; std::vector<int> sizes_vec = {input.n}; /** * std::vector<float *> &input, std::vector<int> &sizes, IntType D, float *search_items, IntType n, int64_t *res_I, float *res_D, IntType k, std::shared_ptr<deviceAllocator> allocator, cudaStream_t userStream, */ raft::spatial::knn::brute_force_knn<int64_t, float, int>(handle, input_vec, sizes_vec, input.d, input.X, input.n, k_graph.knn_indices, k_graph.knn_dists, k_graph.n_neighbors, true, true, nullptr, metric, p); } // dense, int32 indices template <> void get_distances(const raft::handle_t& handle, manifold_dense_inputs_t<float>& input, knn_graph<int, float>& k_graph, cudaStream_t stream, raft::distance::DistanceType metric, float p) { throw raft::exception("Dense TSNE does not support 32-bit integer indices yet."); } // sparse, int32 template <> void get_distances(const raft::handle_t& handle, manifold_sparse_inputs_t<int, float>& input, knn_graph<int, float>& k_graph, cudaStream_t stream, raft::distance::DistanceType metric, float p) { raft::sparse::selection::brute_force_knn(input.indptr, input.indices, input.data, input.nnz, input.n, input.d, input.indptr, input.indices, input.data, input.nnz, input.n, input.d, k_graph.knn_indices, k_graph.knn_dists, k_graph.n_neighbors, handle, ML::Sparse::DEFAULT_BATCH_SIZE, ML::Sparse::DEFAULT_BATCH_SIZE, metric, p); } // sparse, int64 template <> void get_distances(const raft::handle_t& handle, manifold_sparse_inputs_t<int64_t, float>& input, knn_graph<int64_t, float>& k_graph, cudaStream_t stream, raft::distance::DistanceType metric, float p) { throw raft::exception("Sparse TSNE does not support 64-bit integer indices yet."); } /** * @brief Find the maximum element in the distances matrix, then divide all entries by this. * This promotes exp(distances) to not explode. * @param[in] distances: The output sorted distances from KNN * @param[in] total_nn: The number of rows in the data X * @param[in] stream: The GPU stream */ template <typename value_t> void normalize_distances(value_t* distances, const size_t total_nn, cudaStream_t stream) { auto abs_f = [] __device__(const value_t& x) { return abs(x); }; value_t maxNorm = thrust::transform_reduce(rmm::exec_policy(stream), distances, distances + total_nn, abs_f, 0.0f, thrust::maximum<value_t>()); raft::linalg::scalarMultiply(distances, distances, 1.0f / maxNorm, total_nn, stream); } /** * @brief Performs P + P.T. * @param[in] P: The perplexity matrix (n, k) * @param[in] indices: The input sorted indices from KNN. * @param[in] n: The number of rows in the data X. * @param[in] k: The number of nearest neighbors. * @param[out] COO_Matrix: The final P + P.T output COO matrix. * @param[in] stream: The GPU stream. * @param[in] handle: The GPU handle. */ template <typename value_idx, typename value_t, int TPB_X = 32> void symmetrize_perplexity(float* P, value_idx* indices, const value_idx n, const int k, const value_t exaggeration, raft::sparse::COO<value_t, value_idx>* COO_Matrix, cudaStream_t stream, const raft::handle_t& handle) { // Symmetrize to form P + P.T raft::sparse::linalg::from_knn_symmetrize_matrix<value_idx, value_t>( indices, P, n, k, COO_Matrix, stream); } } // namespace TSNE } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsne/kluger_lab_license.txt
Different attribution requirements and conditions apply to different files in this repository: ======================================================================================== The following license applies to the following files in the src directory: tsne.cpp, tsne.h, sptree.cpp, sptree.h, vptree.h Copyright (c) 2014, Laurens van der Maaten (Delft University of Technology) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. All advertising materials mentioning features or use of this software must display the following acknowledgement: This product includes software developed by the Delft University of Technology. 4. Neither the name of the Delft University of Technology nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY LAURENS VAN DER MAATEN ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAURENS VAN DER MAATEN BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ======================================================================================== The following license applies to the following files in the src directory: nbodyfft.h, nbodyfft.cpp, parallel_for.h, time_code.h (The MIT License) Copyright (c) [2019] [George Linderman] Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ======================================================================================== The following license applies to following files in the progress_bar directory: ProgressBar.hpp (The MIT License) Copyright (c) 2016 Prakhar Srivastav <prakhar@prakhar.me> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ======================================================================================== The following license applies to following files in the src directory: annoylib.h Copyright (c) 2013 Spotify AB Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================================================================================== The following license applies to all files in the src/winlibs/fftw directory FFTW is Copyright © 2003, 2007-11 Matteo Frigo, Copyright © 2003, 2007-11 Massachusetts Institute of Technology. FFTW is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA You can also find the GPL on the GNU web site. In addition, we kindly ask you to acknowledge FFTW and its authors in any program or publication in which you use FFTW. (You are not required to do so; it is up to your common sense to decide whether you want to comply with this request or not.) For general publications, we suggest referencing: Matteo Frigo and Steven G. Johnson, “The design and implementation of FFTW3,” Proc. IEEE 93 (2), 216–231 (2005). Non-free versions of FFTW are available under terms different from those of the General Public License. (e.g. they do not require you to accompany any object code using FFTW with the corresponding source code.) For these alternative terms you must purchase a license from MIT’s Technology Licensing Office. Users interested in such a license should contact us (fftw@fftw.org) for more information.
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsne/cannylabs_tsne_license.txt
/* ECL-BH v4.5: Simulation of the gravitational forces in a star cluster using the Barnes-Hut n-body algorithm. Copyright (c) 2010-2020 Texas State University. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Texas State University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Martin Burtscher and Sahar Azimi URL: The latest version of this code is available at https://userweb.cs.txstate.edu/~burtscher/research/ECL-BH/. Publication: This work is described in detail in the following paper. Martin Burtscher and Keshav Pingali. An Efficient CUDA Implementation of the Tree-based Barnes Hut n-Body Algorithm. Chapter 6 in GPU Computing Gems Emerald Edition, pp. 75-92. January 2011. */
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsne/fft_kernels.cuh
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * This code is based on https://github.com/CannyLab/tsne-cuda (licensed under * the BSD 3-clause license at cannylabs_tsne_license.txt), which is in turn a * CUDA implementation of Linderman et al.'s FIt-SNE (MIT license) * (https://github.com/KlugerLab/FIt-SNE). */ #pragma once #include <cuComplex.h> namespace ML { namespace TSNE { namespace FFT { template <typename value_idx, typename value_t> __global__ void compute_chargesQij(volatile value_t* __restrict__ chargesQij, const value_t* __restrict__ xs, const value_t* __restrict__ ys, const value_idx num_points, const value_idx n_terms) { int TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= num_points) return; value_t x_pt = xs[TID]; value_t y_pt = ys[TID]; chargesQij[TID * n_terms + 0] = 1; chargesQij[TID * n_terms + 1] = x_pt; chargesQij[TID * n_terms + 2] = y_pt; chargesQij[TID * n_terms + 3] = x_pt * x_pt + y_pt * y_pt; } template <typename value_idx, typename value_t> __global__ void compute_bounds(volatile value_t* __restrict__ box_lower_bounds, const value_t box_width, const value_t x_min, const value_t y_min, const value_idx n_boxes, const value_idx n_total_boxes) { const int TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= n_boxes * n_boxes) return; const int i = TID / n_boxes; const int j = TID % n_boxes; box_lower_bounds[i * n_boxes + j] = j * box_width + x_min; box_lower_bounds[n_total_boxes + i * n_boxes + j] = i * box_width + y_min; } template <typename value_t> HDI value_t squared_cauchy_2d(value_t x1, value_t x2, value_t y1, value_t y2) { value_t x1_m_y1 = x1 - y1; value_t x2_m_y2 = x2 - y2; value_t t = 1.0f + x1_m_y1 * x1_m_y1 + x2_m_y2 * x2_m_y2; return 1.0f / (t * t); } template <typename value_idx, typename value_t> __global__ void compute_kernel_tilde(volatile value_t* __restrict__ kernel_tilde, const value_t x_min, const value_t y_min, const value_t h, const value_idx n_interpolation_points_1d, const value_idx n_fft_coeffs) { const int TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= n_interpolation_points_1d * n_interpolation_points_1d) return; const value_idx i = TID / n_interpolation_points_1d; const value_idx j = TID % n_interpolation_points_1d; value_t tmp = squared_cauchy_2d(y_min + h / 2, x_min + h / 2, y_min + h / 2 + i * h, x_min + h / 2 + j * h); const value_idx n_interpolation_points_1d_p_i = n_interpolation_points_1d + i; const value_idx n_interpolation_points_1d_m_i = n_interpolation_points_1d - i; const value_idx n_interpolation_points_1d_p_j = n_interpolation_points_1d + j; const value_idx n_interpolation_points_1d_m_j = n_interpolation_points_1d - j; const value_idx p_i_n = n_interpolation_points_1d_p_i * n_fft_coeffs; const value_idx m_i_n = n_interpolation_points_1d_m_i * n_fft_coeffs; kernel_tilde[p_i_n + n_interpolation_points_1d_p_j] = tmp; kernel_tilde[m_i_n + n_interpolation_points_1d_p_j] = tmp; kernel_tilde[p_i_n + n_interpolation_points_1d_m_j] = tmp; kernel_tilde[m_i_n + n_interpolation_points_1d_m_j] = tmp; } template <typename value_idx, typename value_t> __global__ void compute_point_box_idx(volatile value_idx* __restrict__ point_box_idx, volatile value_t* __restrict__ x_in_box, volatile value_t* __restrict__ y_in_box, const value_t* const xs, const value_t* const ys, const value_t* const box_lower_bounds, const value_t min_coord, const value_t box_width, const value_idx n_boxes, const value_idx n_total_boxes, const value_idx N) { const value_idx TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= N) return; value_idx x_idx = static_cast<value_idx>((xs[TID] - min_coord) / box_width); value_idx y_idx = static_cast<value_idx>((ys[TID] - min_coord) / box_width); x_idx = max((value_idx)0, x_idx); x_idx = min(n_boxes - 1, x_idx); y_idx = max((value_idx)0, y_idx); y_idx = min(n_boxes - 1, y_idx); value_idx box_idx = y_idx * n_boxes + x_idx; point_box_idx[TID] = box_idx; x_in_box[TID] = (xs[TID] - box_lower_bounds[box_idx]) / box_width; y_in_box[TID] = (ys[TID] - box_lower_bounds[n_total_boxes + box_idx]) / box_width; } template <typename value_idx, typename value_t> __global__ void interpolate_device(volatile value_t* __restrict__ interpolated_values, const value_t* const y_in_box, const value_t* const y_tilde_spacings, const value_t* const denominator, const value_idx n_interpolation_points, const value_idx N) { const value_idx TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= N * n_interpolation_points) return; value_idx i = TID % N; value_idx j = TID / N; value_t value = 1.0f; value_t ybox_i = y_in_box[i]; for (value_idx k = 0; k < n_interpolation_points; k++) { if (j != k) { value *= ybox_i - y_tilde_spacings[k]; } } interpolated_values[j * N + i] = value / denominator[j]; } template <typename value_idx, typename value_t> __global__ void compute_interpolated_indices(value_t* __restrict__ w_coefficients_device, const value_idx* const point_box_indices, const value_t* const chargesQij, const value_t* const x_interpolated_values, const value_t* const y_interpolated_values, const value_idx N, const value_idx n_interpolation_points, const value_idx n_boxes, const value_idx n_terms) { value_idx TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= n_terms * n_interpolation_points * n_interpolation_points * N) return; value_idx current_term = TID % n_terms; value_idx i = (TID / n_terms) % N; value_idx interp_j = ((TID / n_terms) / N) % n_interpolation_points; value_idx interp_i = ((TID / n_terms) / N) / n_interpolation_points; value_idx box_idx = point_box_indices[i]; value_idx box_i = box_idx % n_boxes; value_idx box_j = box_idx / n_boxes; value_idx idx = (box_i * n_interpolation_points + interp_i) * (n_boxes * n_interpolation_points) + (box_j * n_interpolation_points) + interp_j; atomicAdd(w_coefficients_device + idx * n_terms + current_term, x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * chargesQij[i * n_terms + current_term]); } template <typename value_idx, typename value_t> __global__ void copy_to_fft_input(volatile value_t* __restrict__ fft_input, const value_t* w_coefficients_device, const value_idx n_fft_coeffs, const value_idx n_fft_coeffs_half, const value_idx n_terms) { const value_idx TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= n_terms * n_fft_coeffs_half * n_fft_coeffs_half) return; value_idx current_term = TID / (n_fft_coeffs_half * n_fft_coeffs_half); value_idx current_loc = TID % (n_fft_coeffs_half * n_fft_coeffs_half); value_idx i = current_loc / n_fft_coeffs_half; value_idx j = current_loc % n_fft_coeffs_half; fft_input[current_term * (n_fft_coeffs * n_fft_coeffs) + i * n_fft_coeffs + j] = w_coefficients_device[current_term + current_loc * n_terms]; } template <typename value_idx, typename value_t> __global__ void copy_from_fft_output(volatile value_t* __restrict__ y_tilde_values, const value_t* fft_output, const value_idx n_fft_coeffs, const value_idx n_fft_coeffs_half, const value_idx n_terms) { const value_idx TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= n_terms * n_fft_coeffs_half * n_fft_coeffs_half) return; value_idx current_term = TID / (n_fft_coeffs_half * n_fft_coeffs_half); value_idx current_loc = TID % (n_fft_coeffs_half * n_fft_coeffs_half); value_idx i = current_loc / n_fft_coeffs_half + n_fft_coeffs_half; value_idx j = current_loc % n_fft_coeffs_half + n_fft_coeffs_half; y_tilde_values[current_term + n_terms * current_loc] = fft_output[current_term * (n_fft_coeffs * n_fft_coeffs) + i * n_fft_coeffs + j] / (value_t)(n_fft_coeffs * n_fft_coeffs); } // Template so that division is by compile-time divisors. template <typename value_idx, typename value_t, int n_terms, int n_interpolation_points> __global__ void compute_potential_indices(value_t* __restrict__ potentialsQij, const value_idx* const point_box_indices, const value_t* const y_tilde_values, const value_t* const x_interpolated_values, const value_t* const y_interpolated_values, const value_idx N, const value_idx n_boxes) { const value_idx TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= n_terms * n_interpolation_points * n_interpolation_points * N) return; value_idx current_term = TID % n_terms; value_idx i = (TID / n_terms) % N; value_idx interp_j = ((TID / n_terms) / N) % n_interpolation_points; value_idx interp_i = ((TID / n_terms) / N) / n_interpolation_points; value_idx box_idx = point_box_indices[i]; value_idx box_i = box_idx % n_boxes; value_idx box_j = box_idx / n_boxes; value_idx idx = (box_i * n_interpolation_points + interp_i) * (n_boxes * n_interpolation_points) + (box_j * n_interpolation_points) + interp_j; // interpolated_values[TID] = x_interpolated_values[i + interp_i * N] * // y_interpolated_values[i + interp_j * N] * y_tilde_values[idx * n_terms + current_term]; // interpolated_indices[TID] = i * n_terms + current_term; atomicAdd(potentialsQij + i * n_terms + current_term, x_interpolated_values[i + interp_i * N] * y_interpolated_values[i + interp_j * N] * y_tilde_values[idx * n_terms + current_term]); } template <typename value_idx> __global__ void broadcast_column_vector(cuComplex* __restrict__ mat, cuComplex* __restrict__ vec, value_idx n, value_idx m) { const value_idx TID = threadIdx.x + blockIdx.x * blockDim.x; const value_idx i = TID % n; const value_idx j = TID / n; if (j < m) { value_idx idx = j * n + i; mat[idx] = cuCmulf(mat[idx], vec[i]); } } template <typename value_idx, typename value_t> __global__ void compute_repulsive_forces_kernel( volatile value_t* __restrict__ repulsive_forces_device, volatile value_t* __restrict__ normalization_vec_device, const value_t* const xs, const value_t* const ys, const value_t* const potentialsQij, const value_idx num_points, const value_idx n_terms) { value_idx TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= num_points) return; value_t phi1 = potentialsQij[TID * n_terms + 0]; value_t phi2 = potentialsQij[TID * n_terms + 1]; value_t phi3 = potentialsQij[TID * n_terms + 2]; value_t phi4 = potentialsQij[TID * n_terms + 3]; value_t x_pt = xs[TID]; value_t y_pt = ys[TID]; normalization_vec_device[TID] = (1 + x_pt * x_pt + y_pt * y_pt) * phi1 - 2 * (x_pt * phi2 + y_pt * phi3) + phi4; repulsive_forces_device[TID] = x_pt * phi1 - phi2; repulsive_forces_device[TID + num_points] = y_pt * phi1 - phi3; } template <typename value_idx, typename value_t> __global__ void compute_Pij_x_Qij_kernel(value_t* __restrict__ attr_forces, value_t* __restrict__ Qs, const value_t* __restrict__ pij, const value_idx* __restrict__ coo_rows, const value_idx* __restrict__ coo_cols, const value_t* __restrict__ points, const value_idx num_points, const value_idx num_nonzero, const value_t dof) { const value_idx TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= num_nonzero) return; const value_idx i = coo_rows[TID]; const value_idx j = coo_cols[TID]; value_t ix = points[i]; value_t iy = points[num_points + i]; value_t jx = points[j]; value_t jy = points[num_points + j]; value_t dx = ix - jx; value_t dy = iy - jy; const value_t dist = (dx * dx) + (dy * dy); const value_t P = pij[TID]; const value_t Q = compute_q(dist, dof); const value_t PQ = P * Q; atomicAdd(attr_forces + i, PQ * dx); atomicAdd(attr_forces + num_points + i, PQ * dy); if (Qs) { // when computing KL div Qs[TID] = Q; } } template <typename value_idx, typename value_t> __global__ void IntegrationKernel(volatile value_t* __restrict__ points, volatile value_t* __restrict__ attr_forces, volatile value_t* __restrict__ rep_forces, volatile value_t* __restrict__ gains, volatile value_t* __restrict__ old_forces, const value_t eta, const value_t normalization, const value_t momentum, const value_t exaggeration, const value_idx num_points) { // iterate over all bodies assigned to thread const value_idx inc = blockDim.x * gridDim.x; for (value_idx i = threadIdx.x + blockIdx.x * blockDim.x; i < num_points; i += inc) { value_t ux = old_forces[i]; value_t uy = old_forces[num_points + i]; value_t gx = gains[i]; value_t gy = gains[num_points + i]; value_t dx = exaggeration * attr_forces[i] - (rep_forces[i] / normalization); value_t dy = exaggeration * attr_forces[i + num_points] - (rep_forces[i + num_points] / normalization); gx = signbit(dx) != signbit(ux) ? gx + 0.2 : gx * 0.8; gy = signbit(dy) != signbit(uy) ? gy + 0.2 : gy * 0.8; gx = gx < 0.01 ? 0.01 : gx; gy = gy < 0.01 ? 0.01 : gy; ux = momentum * ux - eta * gx * dx; uy = momentum * uy - eta * gy * dy; points[i] += ux; points[i + num_points] += uy; attr_forces[i] = 0.0f; attr_forces[num_points + i] = 0.0f; rep_forces[i] = 0.0f; rep_forces[num_points + i] = 0.0f; old_forces[i] = ux; old_forces[num_points + i] = uy; gains[i] = gx; gains[num_points + i] = gy; } } } // namespace FFT } // namespace TSNE } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsne/tsne_runner.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "distances.cuh" #include "exact_kernels.cuh" #include "utils.cuh" #include <cuml/common/logger.hpp> #include <cuml/manifold/common.hpp> #include <raft/core/handle.hpp> #include <raft/distance/distance_types.hpp> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <thrust/transform.h> #include "barnes_hut_tsne.cuh" #include "exact_tsne.cuh" #include "fft_tsne.cuh" namespace ML { template <typename tsne_input, typename value_idx, typename value_t> class TSNE_runner { public: TSNE_runner(const raft::handle_t& handle_, tsne_input& input_, knn_graph<value_idx, value_t>& k_graph_, TSNEParams& params_) : handle(handle_), input(input_), k_graph(k_graph_), params(params_), COO_Matrix(handle_.get_stream()) { this->n = input.n; this->p = input.d; this->Y = input.y; ML::Logger::get().setLevel(params.verbosity); if (params.dim > 2 and params.algorithm != TSNE_ALGORITHM::EXACT) { params.algorithm = TSNE_ALGORITHM::EXACT; CUML_LOG_WARN( "Barnes Hut and FFT only work for dim == 2. Switching to exact " "solution."); } if (params.n_neighbors > n) params.n_neighbors = n; if (params.n_neighbors > 1023) { CUML_LOG_WARN("FAISS only supports maximum n_neighbors = 1023."); params.n_neighbors = 1023; } // Perplexity must be less than number of datapoints // "How to Use t-SNE Effectively" https://distill.pub/2016/misread-tsne/ if (params.perplexity > n) params.perplexity = n; CUML_LOG_DEBUG( "Data size = (%d, %d) with dim = %d perplexity = %f", n, p, params.dim, params.perplexity); if (params.perplexity < 5 or params.perplexity > 50) CUML_LOG_WARN( "Perplexity should be within ranges (5, 50). Your results might be a" " bit strange..."); if (params.n_neighbors < params.perplexity * 3.0f) CUML_LOG_WARN( "# of Nearest Neighbors should be at least 3 * perplexity. Your results" " might be a bit strange..."); } value_t run() { distance_and_perplexity(); const auto NNZ = COO_Matrix.nnz; auto* VAL = COO_Matrix.vals(); const auto* COL = COO_Matrix.cols(); const auto* ROW = COO_Matrix.rows(); //--------------------------------------------------- switch (params.algorithm) { case TSNE_ALGORITHM::BARNES_HUT: return TSNE::Barnes_Hut(VAL, COL, ROW, NNZ, handle, Y, n, params); case TSNE_ALGORITHM::FFT: return TSNE::FFT_TSNE(VAL, COL, ROW, NNZ, handle, Y, n, params); case TSNE_ALGORITHM::EXACT: return TSNE::Exact_TSNE(VAL, COL, ROW, NNZ, handle, Y, n, params); } return 0; } private: void distance_and_perplexity() { START_TIMER; //--------------------------------------------------- // Get distances CUML_LOG_DEBUG("Getting distances."); auto stream = handle.get_stream(); rmm::device_uvector<value_idx> indices(0, stream); rmm::device_uvector<value_t> distances(0, stream); if (!k_graph.knn_indices || !k_graph.knn_dists) { ASSERT(!k_graph.knn_indices && !k_graph.knn_dists, "Either both or none of the KNN parameters should be provided"); indices = rmm::device_uvector<value_idx>(n * params.n_neighbors, stream); distances = rmm::device_uvector<value_t>(n * params.n_neighbors, stream); k_graph.knn_indices = indices.data(); k_graph.knn_dists = distances.data(); TSNE::get_distances(handle, input, k_graph, stream, params.metric, params.p); } if (params.square_distances) { auto policy = handle.get_thrust_policy(); thrust::transform(policy, k_graph.knn_dists, k_graph.knn_dists + n * params.n_neighbors, k_graph.knn_dists, TSNE::FunctionalSquare()); } //--------------------------------------------------- END_TIMER(DistancesTime); START_TIMER; //--------------------------------------------------- // Normalize distances CUML_LOG_DEBUG("Now normalizing distances so exp(D) doesn't explode."); TSNE::normalize_distances(k_graph.knn_dists, n * params.n_neighbors, stream); //--------------------------------------------------- END_TIMER(NormalizeTime); START_TIMER; //--------------------------------------------------- // Optimal perplexity CUML_LOG_DEBUG("Searching for optimal perplexity via bisection search."); rmm::device_uvector<value_t> P(n * params.n_neighbors, stream); TSNE::perplexity_search(k_graph.knn_dists, P.data(), params.perplexity, params.perplexity_max_iter, params.perplexity_tol, n, params.n_neighbors, handle); //--------------------------------------------------- END_TIMER(PerplexityTime); START_TIMER; //--------------------------------------------------- // Normalize perplexity to prepare for symmetrization raft::linalg::scalarMultiply(P.data(), P.data(), 1.0f / (2.0f * n), P.size(), stream); //--------------------------------------------------- END_TIMER(NormalizeTime); START_TIMER; //--------------------------------------------------- // Convert data to COO layout TSNE::symmetrize_perplexity(P.data(), k_graph.knn_indices, n, params.n_neighbors, params.early_exaggeration, &COO_Matrix, stream, handle); END_TIMER(SymmetrizeTime); } public: raft::sparse::COO<value_t, value_idx> COO_Matrix; private: const raft::handle_t& handle; tsne_input& input; knn_graph<value_idx, value_t>& k_graph; TSNEParams& params; value_idx n, p; value_t* Y; }; } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsne/fft_tsne.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * This code is based on https://github.com/CannyLab/tsne-cuda (licensed under * the BSD 3-clause license at cannylabs_tsne_license.txt), which is in turn a * CUDA implementation of Linderman et al.'s FIt-SNE (MIT license) * (https://github.com/KlugerLab/FIt-SNE). */ #pragma once #include "fft_kernels.cuh" #include "utils.cuh" #include <cmath> #include <common/device_utils.cuh> #include <cufft_utils.h> #include <raft/linalg/eltwise.cuh> #include <raft/linalg/init.cuh> #include <raft/stats/sum.cuh> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/reduce.h> #include <thrust/transform.h> namespace ML { namespace TSNE { const static int NTHREADS_1024 = 1024; const static int NTHREADS_128 = 128; const static int NTHREADS_32 = 32; struct FunctionalSqrt { template <typename value_t> __host__ __device__ float operator()(const value_t& x) const { return pow(x, 0.5); } }; struct FunctionalSquare { template <typename value_t> __host__ __device__ float operator()(const value_t& x) const { return x * x; } }; template <typename T> cufftResult CUFFTAPI cufft_MakePlanMany(cufftHandle plan, T rank, T* n, T* inembed, T istride, T idist, T* onembed, T ostride, T odist, cufftType type, T batch, size_t* workSize); cufftResult CUFFTAPI cufft_MakePlanMany(cufftHandle plan, int rank, int64_t* n, int64_t* inembed, int64_t istride, int64_t idist, int64_t* onembed, int64_t ostride, int64_t odist, cufftType type, int64_t batch, size_t* workSize) { return cufftMakePlanMany64(plan, rank, reinterpret_cast<long long int*>(n), reinterpret_cast<long long int*>(inembed), static_cast<long long int>(istride), static_cast<long long int>(idist), reinterpret_cast<long long int*>(onembed), static_cast<long long int>(ostride), static_cast<long long int>(odist), type, static_cast<long long int>(batch), workSize); } cufftResult CUFFTAPI cufft_MakePlanMany(cufftHandle plan, int rank, int* n, int* inembed, int istride, int idist, int* onembed, int ostride, int odist, cufftType type, int batch, size_t* workSize) { return cufftMakePlanMany( plan, rank, n, inembed, istride, idist, onembed, ostride, odist, type, batch, workSize); } template <typename value_t, typename value_idx> std::pair<value_t, value_t> min_max(const value_t* Y, const value_idx n, cudaStream_t stream) { value_t min_h, max_h; rmm::device_scalar<value_t> min_d(stream); rmm::device_scalar<value_t> max_d(stream); value_t val = std::numeric_limits<value_t>::max(); min_d.set_value_async(val, stream); val = std::numeric_limits<value_t>::lowest(); max_d.set_value_async(val, stream); auto nthreads = 256; auto nblocks = raft::ceildiv(n, (value_idx)nthreads); min_max_kernel<<<nblocks, nthreads, 0, stream>>>(Y, n, min_d.data(), max_d.data(), true); min_h = min_d.value(stream); max_h = max_d.value(stream); raft::interruptible::synchronize(stream); return std::make_pair(std::move(min_h), std::move(max_h)); } /** * @brief Fast Dimensionality reduction via TSNE using the Barnes Hut O(NlogN) approximation. * @param[in] VAL: The values in the attractive forces COO matrix. * @param[in] COL: The column indices in the attractive forces COO matrix. * @param[in] ROW: The row indices in the attractive forces COO matrix. * @param[in] NNZ: The number of non zeros in the attractive forces COO matrix. * @param[in] handle: The GPU handle. * @param[out] Y: The final embedding (col-major). * @param[in] n: Number of rows in data X. * @param[in] params: Parameters for TSNE model. */ template <typename value_idx, typename value_t> value_t FFT_TSNE(value_t* VAL, const value_idx* COL, const value_idx* ROW, const value_idx NNZ, const raft::handle_t& handle, value_t* Y, const value_idx n, const TSNEParams& params) { auto stream = handle.get_stream(); auto thrust_policy = handle.get_thrust_policy(); // Get device properties //--------------------------------------------------- const int mp_count = raft::getMultiProcessorCount(); const int dev_major_version = MLCommon::getDeviceCapability().first; // These came from the CannyLab implementation, but I don't know how they were // determined. TODO check/optimize. const int integration_kernel_factor = dev_major_version >= 6 ? 2 : dev_major_version == 5 ? 1 : dev_major_version == 3 ? 2 : 3; constexpr value_idx n_interpolation_points = 3; constexpr value_idx min_num_intervals = 50; // The number of "charges" or s+2 sums i.e. number of kernel sums constexpr value_idx n_terms = 4; value_idx n_boxes_per_dim = min_num_intervals; // FFTW is faster on numbers that can be written as 2^a 3^b 5^c 7^d 11^e 13^f // where e+f is either 0 or 1, and the other exponents are arbitrary int allowed_n_boxes_per_dim[20] = {25, 36, 50, 55, 60, 65, 70, 75, 80, 85, 90, 96, 100, 110, 120, 130, 140, 150, 175, 200}; if (n_boxes_per_dim < allowed_n_boxes_per_dim[19]) { // Round up to nearest grid point value_idx chosen_i = 0; while (allowed_n_boxes_per_dim[chosen_i] < n_boxes_per_dim) chosen_i++; n_boxes_per_dim = allowed_n_boxes_per_dim[chosen_i]; } value_idx n_total_boxes = n_boxes_per_dim * n_boxes_per_dim; value_idx total_interpolation_points = n_total_boxes * n_interpolation_points * n_interpolation_points; value_idx n_fft_coeffs_half = n_interpolation_points * n_boxes_per_dim; value_idx n_fft_coeffs = 2 * n_interpolation_points * n_boxes_per_dim; value_idx n_interpolation_points_1d = n_interpolation_points * n_boxes_per_dim; #define DB(type, name, size) rmm::device_uvector<type> name(size, stream) DB(value_t, repulsive_forces_device, n * 2); raft::linalg::zero(repulsive_forces_device.data(), repulsive_forces_device.size(), stream); DB(value_t, attractive_forces_device, n * 2); raft::linalg::zero(attractive_forces_device.data(), attractive_forces_device.size(), stream); DB(value_t, gains_device, n * 2); auto gains_device_thrust = thrust::device_pointer_cast(gains_device.data()); thrust::fill(thrust_policy, gains_device_thrust, gains_device_thrust + (n * 2), 1.0f); DB(value_t, old_forces_device, n * 2); raft::linalg::zero(old_forces_device.data(), old_forces_device.size(), stream); DB(value_t, normalization_vec_device, n); raft::linalg::zero(normalization_vec_device.data(), normalization_vec_device.size(), stream); DB(value_idx, point_box_idx_device, n); DB(value_t, x_in_box_device, n); raft::linalg::zero(x_in_box_device.data(), x_in_box_device.size(), stream); DB(value_t, y_in_box_device, n); raft::linalg::zero(y_in_box_device.data(), y_in_box_device.size(), stream); DB(value_t, y_tilde_values, total_interpolation_points * n_terms); raft::linalg::zero(y_tilde_values.data(), y_tilde_values.size(), stream); DB(value_t, x_interpolated_values_device, n * n_interpolation_points); raft::linalg::zero( x_interpolated_values_device.data(), x_interpolated_values_device.size(), stream); DB(value_t, y_interpolated_values_device, n * n_interpolation_points); raft::linalg::zero( y_interpolated_values_device.data(), y_interpolated_values_device.size(), stream); DB(value_t, potentialsQij_device, n * n_terms); raft::linalg::zero(potentialsQij_device.data(), potentialsQij_device.size(), stream); DB(value_t, w_coefficients_device, total_interpolation_points * n_terms); raft::linalg::zero(w_coefficients_device.data(), w_coefficients_device.size(), stream); DB(value_t, all_interpolated_values_device, n_terms * n_interpolation_points * n_interpolation_points * n); raft::linalg::zero( all_interpolated_values_device.data(), all_interpolated_values_device.size(), stream); DB(value_t, output_values, n_terms * n_interpolation_points * n_interpolation_points * n); raft::linalg::zero(output_values.data(), output_values.size(), stream); DB(value_t, all_interpolated_indices, n_terms * n_interpolation_points * n_interpolation_points * n); raft::linalg::zero(all_interpolated_indices.data(), all_interpolated_indices.size(), stream); DB(value_t, output_indices, n_terms * n_interpolation_points * n_interpolation_points * n); raft::linalg::zero(output_indices.data(), output_indices.size(), stream); DB(value_t, chargesQij_device, n * n_terms); raft::linalg::zero(chargesQij_device.data(), chargesQij_device.size(), stream); DB(value_t, box_lower_bounds_device, 2 * n_total_boxes); raft::linalg::zero(box_lower_bounds_device.data(), box_lower_bounds_device.size(), stream); DB(value_t, kernel_tilde_device, n_fft_coeffs * n_fft_coeffs); raft::linalg::zero(kernel_tilde_device.data(), kernel_tilde_device.size(), stream); DB(cufftComplex, fft_kernel_tilde_device, 2 * n_interpolation_points_1d * 2 * n_interpolation_points_1d); DB(value_t, fft_input, n_terms * n_fft_coeffs * n_fft_coeffs); raft::linalg::zero(fft_input.data(), fft_input.size(), stream); DB(cufftComplex, fft_w_coefficients, n_terms * n_fft_coeffs * (n_fft_coeffs / 2 + 1)); DB(value_t, fft_output, n_terms * n_fft_coeffs * n_fft_coeffs); raft::linalg::zero(fft_output.data(), fft_output.size(), stream); value_t h = 1.0f / n_interpolation_points; value_t y_tilde_spacings[n_interpolation_points]; y_tilde_spacings[0] = h / 2; for (value_idx i = 1; i < n_interpolation_points; i++) { y_tilde_spacings[i] = y_tilde_spacings[i - 1] + h; } value_t denominator[n_interpolation_points]; for (value_idx i = 0; i < n_interpolation_points; i++) { denominator[i] = 1; for (value_idx j = 0; j < n_interpolation_points; j++) { if (i != j) { denominator[i] *= y_tilde_spacings[i] - y_tilde_spacings[j]; } } } DB(value_t, y_tilde_spacings_device, n_interpolation_points); RAFT_CUDA_TRY(cudaMemcpyAsync(y_tilde_spacings_device.data(), y_tilde_spacings, n_interpolation_points * sizeof(value_t), cudaMemcpyHostToDevice, stream)); DB(value_t, denominator_device, n_interpolation_points); RAFT_CUDA_TRY(cudaMemcpyAsync(denominator_device.data(), denominator, n_interpolation_points * sizeof(value_t), cudaMemcpyHostToDevice, stream)); #undef DB cufftHandle plan_kernel_tilde; cufftHandle plan_dft; cufftHandle plan_idft; CUFFT_TRY(cufftCreate(&plan_kernel_tilde)); CUFFT_TRY(cufftSetStream(plan_kernel_tilde, stream)); CUFFT_TRY(cufftCreate(&plan_dft)); CUFFT_TRY(cufftSetStream(plan_dft, stream)); CUFFT_TRY(cufftCreate(&plan_idft)); CUFFT_TRY(cufftSetStream(plan_idft, stream)); size_t work_size, work_size_dft, work_size_idft; value_idx fft_dimensions[2] = {n_fft_coeffs, n_fft_coeffs}; CUFFT_TRY(cufftMakePlan2d( plan_kernel_tilde, fft_dimensions[0], fft_dimensions[1], CUFFT_R2C, &work_size)); CUFFT_TRY(cufft_MakePlanMany(plan_dft, 2, fft_dimensions, NULL, 1, n_fft_coeffs * n_fft_coeffs, NULL, 1, n_fft_coeffs * (n_fft_coeffs / 2 + 1), CUFFT_R2C, n_terms, &work_size_dft)); CUFFT_TRY(cufft_MakePlanMany(plan_idft, 2, fft_dimensions, NULL, 1, n_fft_coeffs * (n_fft_coeffs / 2 + 1), NULL, 1, n_fft_coeffs * n_fft_coeffs, CUFFT_C2R, n_terms, &work_size_idft)); value_t momentum = params.pre_momentum; value_t learning_rate = params.pre_learning_rate; value_t exaggeration = params.early_exaggeration; if (params.initialize_embeddings) { random_vector(Y, 0.0000f, 0.0001f, n * 2, stream, params.random_state); } value_t kl_div = 0; for (int iter = 0; iter < params.max_iter; iter++) { // Compute charges Q_ij { int num_blocks = raft::ceildiv(n, (value_idx)NTHREADS_1024); FFT::compute_chargesQij<<<num_blocks, NTHREADS_1024, 0, stream>>>( chargesQij_device.data(), Y, Y + n, n, n_terms); } if (iter == params.exaggeration_iter) { momentum = params.post_momentum; learning_rate = params.post_learning_rate; exaggeration = params.late_exaggeration; } raft::linalg::zero(w_coefficients_device.data(), w_coefficients_device.size(), stream); raft::linalg::zero(potentialsQij_device.data(), potentialsQij_device.size(), stream); // IntegrationKernel zeroes this, but if this is removed // then FITSNE runs in an indefinite loop raft::linalg::zero(attractive_forces_device.data(), attractive_forces_device.size(), stream); auto minmax_pair = min_max(Y, n * 2, stream); auto min_coord = minmax_pair.first; auto max_coord = minmax_pair.second; value_t box_width = (max_coord - min_coord) / static_cast<value_t>(n_boxes_per_dim); //// Precompute FFT // Left and right bounds of each box, first the lower bounds in the x // direction, then in the y direction { auto num_blocks = raft::ceildiv(n_total_boxes, (value_idx)NTHREADS_32); FFT::compute_bounds<<<num_blocks, NTHREADS_32, 0, stream>>>(box_lower_bounds_device.data(), box_width, min_coord, min_coord, n_boxes_per_dim, n_total_boxes); } { // Evaluate the kernel at the interpolation nodes and form the embedded // generating kernel vector for a circulant matrix. // Coordinates of all the equispaced interpolation points value_t h = box_width / n_interpolation_points; auto num_blocks = raft::ceildiv(n_interpolation_points_1d * n_interpolation_points_1d, (value_idx)NTHREADS_32); FFT::compute_kernel_tilde<<<num_blocks, NTHREADS_32, 0, stream>>>(kernel_tilde_device.data(), min_coord, min_coord, h, n_interpolation_points_1d, n_fft_coeffs); } { // Precompute the FFT of the kernel generating matrix CUFFT_TRY(cufftExecR2C( plan_kernel_tilde, kernel_tilde_device.data(), fft_kernel_tilde_device.data())); } { //// Run N-body FFT auto num_blocks = raft::ceildiv(n, (value_idx)NTHREADS_128); FFT::compute_point_box_idx<<<num_blocks, NTHREADS_128, 0, stream>>>( point_box_idx_device.data(), x_in_box_device.data(), y_in_box_device.data(), Y, Y + n, box_lower_bounds_device.data(), min_coord, box_width, n_boxes_per_dim, n_total_boxes, n); // Step 1: Interpolate kernel using Lagrange polynomials and compute the w // coefficients. // Compute the interpolated values at each real point with each Lagrange // polynomial in the `x` direction num_blocks = raft::ceildiv(n * n_interpolation_points, (value_idx)NTHREADS_128); FFT::interpolate_device<<<num_blocks, NTHREADS_128, 0, stream>>>( x_interpolated_values_device.data(), x_in_box_device.data(), y_tilde_spacings_device.data(), denominator_device.data(), n_interpolation_points, n); // ...and in the `y` direction FFT::interpolate_device<<<num_blocks, NTHREADS_128, 0, stream>>>( y_interpolated_values_device.data(), y_in_box_device.data(), y_tilde_spacings_device.data(), denominator_device.data(), n_interpolation_points, n); num_blocks = raft::ceildiv(n_terms * n_interpolation_points * n_interpolation_points * n, (value_idx)NTHREADS_128); FFT::compute_interpolated_indices<<<num_blocks, NTHREADS_128, 0, stream>>>( w_coefficients_device.data(), point_box_idx_device.data(), chargesQij_device.data(), x_interpolated_values_device.data(), y_interpolated_values_device.data(), n, n_interpolation_points, n_boxes_per_dim, n_terms); // Step 2: Compute the values v_{m, n} at the equispaced nodes, multiply // the kernel matrix with the coefficients w num_blocks = raft::ceildiv(n_terms * n_fft_coeffs_half * n_fft_coeffs_half, (value_idx)NTHREADS_128); FFT::copy_to_fft_input<<<num_blocks, NTHREADS_128, 0, stream>>>( fft_input.data(), w_coefficients_device.data(), n_fft_coeffs, n_fft_coeffs_half, n_terms); // Compute fft values at interpolated nodes CUFFT_TRY(cufftExecR2C(plan_dft, fft_input.data(), fft_w_coefficients.data())); // Take the broadcasted Hadamard product of a complex matrix and a complex // vector. { const value_idx nn = n_fft_coeffs * (n_fft_coeffs / 2 + 1); auto num_blocks = raft::ceildiv(nn * n_terms, (value_idx)NTHREADS_32); FFT::broadcast_column_vector<<<num_blocks, NTHREADS_32, 0, stream>>>( fft_w_coefficients.data(), fft_kernel_tilde_device.data(), nn, n_terms); } // Invert the computed values at the interpolated nodes. CUFFT_TRY(cufftExecC2R(plan_idft, fft_w_coefficients.data(), fft_output.data())); FFT::copy_from_fft_output<<<num_blocks, NTHREADS_128, 0, stream>>>( y_tilde_values.data(), fft_output.data(), n_fft_coeffs, n_fft_coeffs_half, n_terms); // Step 3: Compute the potentials \tilde{\phi} num_blocks = raft::ceildiv(n_terms * n_interpolation_points * n_interpolation_points * n, (value_idx)NTHREADS_128); FFT::compute_potential_indices<value_idx, value_t, n_terms, n_interpolation_points> <<<num_blocks, NTHREADS_128, 0, stream>>>(potentialsQij_device.data(), point_box_idx_device.data(), y_tilde_values.data(), x_interpolated_values_device.data(), y_interpolated_values_device.data(), n, n_boxes_per_dim); } value_t normalization; { // Compute repulsive forces // Make the negative term, or F_rep in the equation 3 of the paper. auto num_blocks = raft::ceildiv(n, (value_idx)NTHREADS_1024); FFT::compute_repulsive_forces_kernel<<<num_blocks, NTHREADS_1024, 0, stream>>>( repulsive_forces_device.data(), normalization_vec_device.data(), Y, Y + n, potentialsQij_device.data(), n, n_terms); auto norm_vec_thrust = thrust::device_pointer_cast(normalization_vec_device.data()); value_t sumQ = thrust::reduce(thrust_policy, norm_vec_thrust, norm_vec_thrust + normalization_vec_device.size(), 0.0f, thrust::plus<value_t>()); normalization = sumQ - n; } // Compute attractive forces { auto num_blocks = raft::ceildiv(NNZ, (value_idx)NTHREADS_1024); const float dof = fmaxf(params.dim - 1, 1); // degree of freedom if (iter == params.max_iter - 1) { // last iteration rmm::device_uvector<value_t> tmp(NNZ, stream); value_t* Qs = tmp.data(); value_t* KL_divs = tmp.data(); FFT::compute_Pij_x_Qij_kernel<<<num_blocks, NTHREADS_1024, 0, stream>>>( attractive_forces_device.data(), Qs, VAL, ROW, COL, Y, n, NNZ, dof); kl_div = compute_kl_div(VAL, Qs, KL_divs, NNZ, stream); } else { FFT::compute_Pij_x_Qij_kernel<<<num_blocks, NTHREADS_1024, 0, stream>>>( attractive_forces_device.data(), (value_t*)nullptr, VAL, ROW, COL, Y, n, NNZ, dof); } } // Apply Forces { auto num_blocks = mp_count * integration_kernel_factor; FFT::IntegrationKernel<<<num_blocks, NTHREADS_1024, 0, stream>>>( Y, attractive_forces_device.data(), repulsive_forces_device.data(), gains_device.data(), old_forces_device.data(), learning_rate, normalization, momentum, exaggeration, n); } auto att_forces_thrust = thrust::device_pointer_cast(attractive_forces_device.data()); auto old_forces_thrust = thrust::device_pointer_cast(old_forces_device.data()); thrust::transform(thrust_policy, old_forces_thrust, old_forces_thrust + n, att_forces_thrust, FunctionalSquare()); thrust::transform(thrust_policy, att_forces_thrust, att_forces_thrust + n, att_forces_thrust + n, att_forces_thrust, thrust::plus<value_t>()); thrust::transform(thrust_policy, att_forces_thrust, att_forces_thrust + attractive_forces_device.size(), att_forces_thrust, FunctionalSqrt()); value_t grad_norm = thrust::reduce(thrust_policy, att_forces_thrust, att_forces_thrust + attractive_forces_device.size(), 0.0f, thrust::plus<value_t>()) / attractive_forces_device.size(); if (grad_norm <= params.min_grad_norm) { CUML_LOG_DEBUG("Breaking early as `min_grad_norm` was satisfied, after %d iterations", iter); break; } } CUFFT_TRY(cufftDestroy(plan_kernel_tilde)); CUFFT_TRY(cufftDestroy(plan_dft)); CUFFT_TRY(cufftDestroy(plan_idft)); return kl_div; } } // namespace TSNE } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsne/exact_kernels.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <float.h> #include <math.h> #include <raft/linalg/eltwise.cuh> #include <raft/util/cudart_utils.hpp> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/reduce.h> #define restrict __restrict__ namespace ML { namespace TSNE { /****************************************/ /* Finds the best Gaussian bandwidth for each row in the dataset */ template <typename value_idx, typename value_t> __global__ void sigmas_kernel(const value_t* restrict distances, value_t* restrict P, const float perplexity, const float desired_entropy, const int epochs, const float tol, const value_idx n, const int k) { // For every item in row const auto i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i >= n) return; value_t beta_min = -INFINITY, beta_max = INFINITY; value_t beta = 1; register const auto ik = i * k; for (int step = 0; step < epochs; step++) { value_t sum_Pi = FLT_EPSILON; // Exponentiate to get Gaussian for (int j = 0; j < k; j++) { P[ik + j] = __expf(-distances[ik + j] * beta); sum_Pi += P[ik + j]; } // Normalize value_t sum_disti_Pi = 0; const value_t div = __fdividef(1.0f, sum_Pi); for (int j = 0; j < k; j++) { P[ik + j] *= div; sum_disti_Pi += distances[ik + j] * P[ik + j]; } const value_t entropy = __logf(sum_Pi) + beta * sum_disti_Pi; const value_t entropy_diff = entropy - desired_entropy; if (fabs(entropy_diff) <= tol) break; // Bisection search if (entropy_diff > 0) { beta_min = beta; if (isinf(beta_max)) beta *= 2.0f; else beta = (beta + beta_max) * 0.5f; } else { beta_max = beta; if (isinf(beta_min)) beta *= 0.5f; else beta = (beta + beta_min) * 0.5f; } } } /****************************************/ /* Finds the best Gaussian bandwidth for each row in the dataset */ template <typename value_idx, typename value_t> __global__ void sigmas_kernel_2d(const value_t* restrict distances, value_t* restrict P, const float perplexity, const float desired_entropy, const int epochs, const float tol, const value_idx n) { // For every item in row const auto i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i >= n) return; value_t beta_min = -INFINITY, beta_max = INFINITY; value_t beta = 1; register const auto ik = i * 2; for (int step = 0; step < epochs; step++) { // Exponentiate to get Gaussian P[ik] = __expf(-distances[ik] * beta); P[ik + 1] = __expf(-distances[ik + 1] * beta); const value_t sum_Pi = FLT_EPSILON + P[ik] + P[ik + 1]; // Normalize const value_t div = __fdividef(1.0f, sum_Pi); P[ik] *= div; P[ik + 1] *= div; const value_t sum_disti_Pi = distances[ik] * P[ik] + distances[ik + 1] * P[ik + 1]; const value_t entropy = __logf(sum_Pi) + beta * sum_disti_Pi; const value_t entropy_diff = entropy - desired_entropy; if (fabs(entropy_diff) <= tol) break; // Bisection search if (entropy_diff > 0) { beta_min = beta; if (isinf(beta_max)) beta *= 2.0f; else beta = (beta + beta_max) * 0.5f; } else { beta_max = beta; if (isinf(beta_min)) beta *= 0.5f; else beta = (beta + beta_min) * 0.5f; } } } /****************************************/ template <typename value_idx, typename value_t> void perplexity_search(const value_t* restrict distances, value_t* restrict P, const float perplexity, const int epochs, const float tol, const value_idx n, const int dim, const raft::handle_t& handle) { const float desired_entropy = logf(perplexity); cudaStream_t stream = handle.get_stream(); if (dim == 2) sigmas_kernel_2d<<<raft::ceildiv(n, (value_idx)1024), 1024, 0, stream>>>( distances, P, perplexity, desired_entropy, epochs, tol, n); else sigmas_kernel<<<raft::ceildiv(n, (value_idx)1024), 1024, 0, stream>>>( distances, P, perplexity, desired_entropy, epochs, tol, n, dim); RAFT_CUDA_TRY(cudaPeekAtLastError()); handle.sync_stream(stream); } /****************************************/ /* Compute attractive forces in O(uN) time. Uses only nearest neighbors */ template <typename value_idx, typename value_t> __global__ void attractive_kernel(const value_t* restrict VAL, const value_idx* restrict COL, const value_idx* restrict ROW, const value_t* restrict Y, const value_t* restrict norm, value_t* restrict attract, value_t* restrict Qs, const value_idx NNZ, const value_idx n, const value_idx dim, const value_t dof) { const auto index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= NNZ) return; const auto i = ROW[index], j = COL[index]; // Euclidean distances // TODO: can provide any distance ie cosine // #862 value_t dist = 0; for (int k = 0; k < dim; k++) dist += Y[k * n + i] * Y[k * n + j]; dist = norm[i] + norm[j] - 2.0f * dist; const value_t P = VAL[index]; const value_t Q = compute_q(dist, dof); const value_t PQ = P * Q; // Apply forces for (int k = 0; k < dim; k++) { raft::myAtomicAdd(&attract[k * n + i], PQ * (Y[k * n + i] - Y[k * n + j])); } if (Qs) { // when computing KL div Qs[index] = Q; } } /****************************************/ /* Special case when dim == 2. Can speed up many calculations up */ template <typename value_idx, typename value_t> __global__ void attractive_kernel_2d(const value_t* restrict VAL, const value_idx* restrict COL, const value_idx* restrict ROW, const value_t* restrict Y1, const value_t* restrict Y2, const value_t* restrict norm, value_t* restrict attract1, value_t* restrict attract2, value_t* restrict Qs, const value_idx NNZ, const value_t dof) { const auto index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= NNZ) return; const auto i = ROW[index], j = COL[index]; // Euclidean distances // TODO: can provide any distance ie cosine // #862 const value_t dist = norm[i] + norm[j] - 2.0f * (Y1[i] * Y1[j] + Y2[i] * Y2[j]); const value_t P = VAL[index]; const value_t Q = compute_q(dist, dof); const value_t PQ = P * Q; // Apply forces raft::myAtomicAdd(&attract1[i], PQ * (Y1[i] - Y1[j])); raft::myAtomicAdd(&attract2[i], PQ * (Y2[i] - Y2[j])); if (Qs) { // when computing KL div Qs[index] = Q; } } /****************************************/ template <typename value_idx, typename value_t> void attractive_forces(const value_t* restrict VAL, const value_idx* restrict COL, const value_idx* restrict ROW, const value_t* restrict Y, const value_t* restrict norm, value_t* restrict attract, value_t* restrict Qs, const value_idx NNZ, const value_idx n, const value_idx dim, const value_t dof, cudaStream_t stream) { RAFT_CUDA_TRY(cudaMemsetAsync(attract, 0, sizeof(value_t) * n * dim, stream)); // TODO: Calculate Kullback-Leibler divergence // #863 // For general embedding dimensions if (dim != 2) { attractive_kernel<<<raft::ceildiv(NNZ, (value_idx)1024), 1024, 0, stream>>>( VAL, COL, ROW, Y, norm, attract, Qs, NNZ, n, dim, dof); } // For special case dim == 2 else { attractive_kernel_2d<<<raft::ceildiv(NNZ, (value_idx)1024), 1024, 0, stream>>>( VAL, COL, ROW, Y, Y + n, norm, attract, attract + n, Qs, NNZ, dof); } RAFT_CUDA_TRY(cudaPeekAtLastError()); } /****************************************/ /* Computes repulsive forces in pseudo-O(N^2) time where many of the math ops are made considerably faster. */ template <typename value_idx, typename value_t> __global__ void repulsive_kernel(const value_t* restrict Y, value_t* restrict repel, const value_t* restrict norm, value_t* restrict Z_sum1, value_t* restrict Z_sum2, const value_idx n, const value_idx dim, const value_t df_power, // -(df + 1)/2) const value_t recp_df) // 1 / df { const auto j = (blockIdx.x * blockDim.x) + threadIdx.x; // for every item in row const auto i = (blockIdx.y * blockDim.y) + threadIdx.y; // for every row if (j >= i || i >= n || j >= n) return; // Euclidean distances // TODO: can provide any distance ie cosine value_t d = 0; for (int k = 0; k < dim; k++) d += Y[k * n + i] * Y[k * n + j]; const value_t euclidean_d = -2.0f * d + norm[i] + norm[j]; // Q and Q^2 const value_t Q = __powf((1.0f + euclidean_d * recp_df), df_power); const value_t Q2 = Q * Q; // Apply forces for (int k = 0; k < dim; k++) { const value_t force = Q2 * (Y[k * n + j] - Y[k * n + i]); raft::myAtomicAdd(&repel[k * n + i], force); raft::myAtomicAdd(&repel[k * n + j], force); } // Sum up Z sum if (i % 2 == 0) raft::myAtomicAdd(&Z_sum1[i], Q); else raft::myAtomicAdd(&Z_sum2[i], Q); } /****************************************/ /* Special case when dim == 2. Much faster since calculations are streamlined. */ template <typename value_idx, typename value_t> __global__ void repulsive_kernel_2d(const value_t* restrict Y1, const value_t* restrict Y2, value_t* restrict repel1, value_t* restrict repel2, const value_t* restrict norm, value_t* restrict Z_sum1, value_t* restrict Z_sum2, const value_idx n) { const auto j = (blockIdx.x * blockDim.x) + threadIdx.x; // for every item in row const auto i = (blockIdx.y * blockDim.y) + threadIdx.y; // for every row if (j >= i || i >= n || j >= n) return; // Euclidean distances // TODO: can provide any distance ie cosine // #862 const value_t euclidean_d = norm[i] + norm[j] - 2.0f * (Y1[i] * Y1[j] + Y2[i] * Y2[j]); const value_t Q = __fdividef(1.0f, (1.0f + euclidean_d)); const value_t Q2 = Q * Q; const value_t force1 = Q2 * (Y1[j] - Y1[i]); const value_t force2 = Q2 * (Y2[j] - Y2[i]); // Add forces raft::myAtomicAdd(&repel1[i], force1); raft::myAtomicAdd(&repel1[j], -force1); raft::myAtomicAdd(&repel2[i], force2); raft::myAtomicAdd(&repel2[j], -force2); // Sum up Z sum if (i % 2 == 0) raft::myAtomicAdd(&Z_sum1[i], Q); else raft::myAtomicAdd(&Z_sum2[i], Q); } /****************************************/ template <typename value_idx, typename value_t, int TPB_X = 32, int TPB_Y = 32> value_t repulsive_forces(const value_t* restrict Y, value_t* restrict repel, const value_t* restrict norm, value_t* restrict Z_sum, const value_idx n, const value_idx dim, const value_t df_power, // -(df + 1)/2) const value_t recp_df, cudaStream_t stream) { RAFT_CUDA_TRY(cudaMemsetAsync(Z_sum, 0, sizeof(value_t) * 2 * n, stream)); RAFT_CUDA_TRY(cudaMemsetAsync(repel, 0, sizeof(value_t) * n * dim, stream)); const dim3 threadsPerBlock(TPB_X, TPB_Y); const dim3 numBlocks(raft::ceildiv(n, (value_idx)TPB_X), raft::ceildiv(n, (value_idx)TPB_Y)); // For general embedding dimensions if (dim != 2) { repulsive_kernel<<<numBlocks, threadsPerBlock, 0, stream>>>( Y, repel, norm, Z_sum, Z_sum + n, n, dim, df_power, recp_df); } // For special dim == 2 case else { repulsive_kernel_2d<<<numBlocks, threadsPerBlock, 0, stream>>>( Y, Y + n, repel, repel + n, norm, Z_sum, Z_sum + n, n); } RAFT_CUDA_TRY(cudaPeekAtLastError()); // Find sum(Z_sum) thrust::device_ptr<value_t> begin = thrust::device_pointer_cast(Z_sum); value_t Z = thrust::reduce(thrust::cuda::par.on(stream), begin, begin + 2 * n); return 1.0f / (2.0f * (Z + (value_t)n)); // Notice + n since diagonal of repulsion sums to n } /****************************************/ /* Applies or integrates all forces. Uses more gains and constrains the output for output stability */ template <typename value_idx, typename value_t> __global__ void apply_kernel(value_t* restrict Y, value_t* restrict velocity, const value_t* restrict attract, const value_t* restrict repel, value_t* restrict means, value_t* restrict gains, const float Z, // sum(Q) const float learning_rate, const float C, // constant from T-Dist Degrees of Freedom const float exaggeration, const float momentum, const value_idx SIZE, // SIZE = n*dim const value_idx n, const float min_gain, value_t* restrict gradient, const bool check_convergence) { const auto index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= SIZE) return; const value_t dy = C * (exaggeration * attract[index] + Z * repel[index]); if (check_convergence) gradient[index] = dy * dy; // Find new gain // TODO: Incorporate AadaBound (2019) or Adam // #864 if (signbit(dy) != signbit(velocity[index])) gains[index] += 0.2f; // Original TSNE is 0.2 else gains[index] *= 0.8f; // Original TSNE is 0.8 if (gains[index] < min_gain) gains[index] = min_gain; velocity[index] = momentum * velocity[index] - learning_rate * dy * gains[index]; Y[index] += velocity[index]; // Add to mean // raft::myAtomicAdd(&means[index / n], Y[index]); } /****************************************/ template <typename value_idx, typename value_t, int TPB_X = 32, int TPB_Y = 32> value_t apply_forces(value_t* restrict Y, value_t* restrict velocity, const value_t* restrict attract, const value_t* restrict repel, value_t* restrict means, value_t* restrict gains, const float Z, // sum(Q) const float learning_rate, const float C, // constant from T-dist const float exaggeration, const float momentum, const value_idx dim, const value_idx n, const float min_gain, value_t* restrict gradient, const bool check_convergence, cudaStream_t stream) { // cudaMemset(means, 0, sizeof(float) * dim); if (check_convergence) RAFT_CUDA_TRY(cudaMemsetAsync(gradient, 0, sizeof(value_t) * n * dim, stream)); apply_kernel<<<raft::ceildiv(n * dim, (value_idx)1024), 1024, 0, stream>>>(Y, velocity, attract, repel, means, gains, Z, learning_rate, C, exaggeration, momentum, n * dim, n, min_gain, gradient, check_convergence); RAFT_CUDA_TRY(cudaPeekAtLastError()); // Find sum of gradient norms float gradient_norm = INFINITY; if (check_convergence) { thrust::device_ptr<value_t> begin = thrust::device_pointer_cast(gradient); gradient_norm = sqrtf(thrust::reduce(thrust::cuda::par.on(stream), begin, begin + n * dim)); } // TODO: Subtract means return gradient_norm; } } // namespace TSNE } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsne/barnes_hut_kernels.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #define restrict __restrict__ #define THREADS1 512 #define THREADS2 512 #define THREADS3 768 #define THREADS4 128 #define THREADS5 1024 #define THREADS6 1024 #define THREADS7 1024 #define FACTOR1 3 #define FACTOR2 3 #define FACTOR3 1 #define FACTOR4 4 #define FACTOR5 2 #define FACTOR6 2 #define FACTOR7 1 #include <float.h> #include <raft/util/cudart_utils.hpp> #include <raft/util/device_atomics.cuh> namespace ML { namespace TSNE { namespace BH { /** * Initializes the states of objects. This speeds the overall kernel up. */ template <typename value_idx, typename value_t> __global__ void InitializationKernel(/*int *restrict errd, */ unsigned* restrict limiter, value_idx* restrict maxdepthd, value_t* restrict radiusd) { // errd[0] = 0; maxdepthd[0] = 1; limiter[0] = 0; radiusd[0] = 0.0f; } /** * Reset normalization back to 0. */ template <typename value_idx, typename value_t> __global__ void Reset_Normalization(value_t* restrict Z_norm, value_t* restrict radiusd_squared, value_idx* restrict bottomd, const value_idx NNODES, const value_t* restrict radiusd) { Z_norm[0] = 0.0f; radiusd_squared[0] = radiusd[0] * radiusd[0]; // create root node bottomd[0] = NNODES; } /** * Find 1/Z */ template <typename value_idx, typename value_t> __global__ void Find_Normalization(value_t* restrict Z_norm, const value_idx N) { Z_norm[0] = 1.0f / (Z_norm[0] - N); } /** * Figures the bounding boxes for every point in the embedding. */ template <typename value_idx, typename value_t> __global__ __launch_bounds__(THREADS1) void BoundingBoxKernel(value_idx* restrict startd, value_idx* restrict childd, value_t* restrict massd, value_t* restrict posxd, value_t* restrict posyd, value_t* restrict maxxd, value_t* restrict maxyd, value_t* restrict minxd, value_t* restrict minyd, const value_idx FOUR_NNODES, const value_idx NNODES, const value_idx N, unsigned* restrict limiter, value_t* restrict radiusd) { value_t val, minx, maxx, miny, maxy; __shared__ value_t sminx[THREADS1], smaxx[THREADS1], sminy[THREADS1], smaxy[THREADS1]; // initialize with valid data (in case #bodies < #threads) minx = maxx = posxd[0]; miny = maxy = posyd[0]; // scan all bodies const auto i = threadIdx.x; const auto inc = THREADS1 * gridDim.x; for (auto j = i + blockIdx.x * THREADS1; j < N; j += inc) { val = posxd[j]; if (val < minx) minx = val; else if (val > maxx) maxx = val; val = posyd[j]; if (val < miny) miny = val; else if (val > maxy) maxy = val; } // reduction in shared memory sminx[i] = minx; smaxx[i] = maxx; sminy[i] = miny; smaxy[i] = maxy; for (auto j = THREADS1 / 2; j > i; j /= 2) { __syncthreads(); const auto k = i + j; sminx[i] = minx = fminf(minx, sminx[k]); smaxx[i] = maxx = fmaxf(maxx, smaxx[k]); sminy[i] = miny = fminf(miny, sminy[k]); smaxy[i] = maxy = fmaxf(maxy, smaxy[k]); } if (i == 0) { // write block result to global memory const auto k = blockIdx.x; minxd[k] = minx; maxxd[k] = maxx; minyd[k] = miny; maxyd[k] = maxy; __threadfence(); const auto inc = gridDim.x - 1; if (inc != atomicInc(limiter, inc)) return; // I'm the last block, so combine all block results for (auto j = 0; j <= inc; j++) { minx = fminf(minx, minxd[j]); maxx = fmaxf(maxx, maxxd[j]); miny = fminf(miny, minyd[j]); maxy = fmaxf(maxy, maxyd[j]); } // compute 'radius' atomicExch(radiusd, fmaxf(maxx - minx, maxy - miny) * 0.5f + 1e-5f); massd[NNODES] = -1.0f; startd[NNODES] = 0; posxd[NNODES] = (minx + maxx) * 0.5f; posyd[NNODES] = (miny + maxy) * 0.5f; #pragma unroll for (auto a = 0; a < 4; a++) childd[FOUR_NNODES + a] = -1; } } /** * Clear some of the state vectors up. */ template <typename value_idx> __global__ __launch_bounds__(1024, 1) void ClearKernel1(value_idx* restrict childd, const value_idx FOUR_NNODES, const value_idx FOUR_N) { const auto inc = blockDim.x * gridDim.x; value_idx k = (FOUR_N & -32) + threadIdx.x + blockIdx.x * blockDim.x; if (k < FOUR_N) k += inc; // iterate over all cells assigned to thread #pragma unroll for (; k < FOUR_NNODES; k += inc) childd[k] = -1; } /** * Build the actual QuadTree. * See: https://iss.oden.utexas.edu/Publications/Papers/burtscher11.pdf */ template <typename value_idx, typename value_t> __global__ __launch_bounds__(THREADS2) void TreeBuildingKernel(/* int *restrict errd, */ value_idx* restrict childd, const value_t* restrict posxd, const value_t* restrict posyd, const value_idx NNODES, const value_idx N, value_idx* restrict maxdepthd, value_idx* restrict bottomd, const value_t* restrict radiusd) { value_idx j, depth; value_t x, y, r; value_t px, py; value_idx ch, n, locked, patch; // cache root data const value_t radius = radiusd[0]; const value_t rootx = posxd[NNODES]; const value_t rooty = posyd[NNODES]; value_idx localmaxdepth = 1; value_idx skip = 1; const auto inc = blockDim.x * gridDim.x; value_idx i = threadIdx.x + blockIdx.x * blockDim.x; // iterate over all bodies assigned to thread while (i < N) { if (skip != 0) { // new body, so start traversing at root skip = 0; n = NNODES; depth = 1; r = radius * 0.5f; /* Select child node 'j' rootx < px rootx > px * rooty < py 1 -> 3 0 -> 2 * rooty > py 1 -> 1 0 -> 0 */ x = rootx + ((rootx < (px = posxd[i])) ? (j = 1, r) : (j = 0, -r)); y = rooty + ((rooty < (py = posyd[i])) ? (j |= 2, r) : (-r)); } // follow path to leaf cell while ((ch = childd[n * 4 + j]) >= N) { n = ch; depth++; r *= 0.5f; x += ((x < px) ? (j = 1, r) : (j = 0, -r)); y += ((y < py) ? (j |= 2, r) : (-r)); } // (ch)ild will be '-1' (nullptr), '-2' (locked), or an Integer corresponding to a body offset // in the lower [0, N) blocks of childd if (ch != -2) { // skip if child pointer was locked when we examined it, and try again later. locked = n * 4 + j; // store the locked position in case we need to patch in a cell later. if (ch == -1) { // Child is a nullptr ('-1'), so we write our body index to the leaf, and move on to the // next body. if (atomicCAS(&childd[locked], (value_idx)-1, i) == -1) { if (depth > localmaxdepth) localmaxdepth = depth; i += inc; // move on to next body skip = 1; } } else { // Child node isn't empty, so we store the current value of the child, lock the leaf, and // patch in a new cell if (ch == atomicCAS(&childd[locked], ch, (value_idx)-2)) { patch = -1; while (ch >= 0) { depth++; const value_idx cell = atomicAdd(bottomd, (value_idx)-1) - 1; if (cell == N) { atomicExch(reinterpret_cast<unsigned long long int*>(bottomd), (unsigned long long int)NNODES); } else if (cell < N) { depth--; continue; } if (patch != -1) childd[n * 4 + j] = cell; if (cell > patch) patch = cell; // Insert migrated child node j = (x < posxd[ch]) ? 1 : 0; if (y < posyd[ch]) j |= 2; childd[cell * 4 + j] = ch; n = cell; r *= 0.5f; x += ((x < px) ? (j = 1, r) : (j = 0, -r)); y += ((y < py) ? (j |= 2, r) : (-r)); ch = childd[n * 4 + j]; if (r <= 1e-10) { break; } } childd[n * 4 + j] = i; if (depth > localmaxdepth) localmaxdepth = depth; i += inc; // move on to next body skip = 2; } } } __threadfence(); if (skip == 2) childd[locked] = patch; } // record maximum tree depth // if (localmaxdepth >= THREADS5) // localmaxdepth = THREADS5 - 1; if (localmaxdepth > 32) localmaxdepth = 32; atomicMax(maxdepthd, localmaxdepth); } /** * Clean more state vectors. */ template <typename value_idx, typename value_t> __global__ __launch_bounds__(1024, 1) void ClearKernel2(value_idx* restrict startd, value_t* restrict massd, const value_idx NNODES, const value_idx* restrict bottomd) { const auto bottom = bottomd[0]; const auto inc = blockDim.x * gridDim.x; auto k = (bottom & -32) + threadIdx.x + blockIdx.x * blockDim.x; if (k < bottom) k += inc; // iterate over all cells assigned to thread #pragma unroll for (; k < NNODES; k += inc) { massd[k] = -1.0f; startd[k] = -1; } } /** * Summarize the KD Tree via cell gathering */ template <typename value_idx, typename value_t> __global__ __launch_bounds__(THREADS3, FACTOR3) void SummarizationKernel(value_idx* restrict countd, const value_idx* restrict childd, volatile value_t* restrict massd, value_t* restrict posxd, value_t* restrict posyd, const value_idx NNODES, const value_idx N, const value_idx* restrict bottomd) { bool flag = 0; value_t cm, px, py; __shared__ value_idx child[THREADS3 * 4]; __shared__ value_t mass[THREADS3 * 4]; const auto bottom = bottomd[0]; const auto inc = blockDim.x * gridDim.x; auto k = (bottom & -32) + threadIdx.x + blockIdx.x * blockDim.x; if (k < bottom) k += inc; const auto restart = k; for (int j = 0; j < 5; j++) // wait-free pre-passes { // iterate over all cells assigned to thread while (k <= NNODES) { if (massd[k] < 0.0f) { for (int i = 0; i < 4; i++) { const auto ch = childd[k * 4 + i]; child[i * THREADS3 + threadIdx.x] = ch; if ((ch >= N) and ((mass[i * THREADS3 + threadIdx.x] = massd[ch]) < 0)) goto CONTINUE_LOOP; } // all children are ready cm = 0.0f; px = 0.0f; py = 0.0f; auto cnt = 0; #pragma unroll for (int i = 0; i < 4; i++) { const int ch = child[i * THREADS3 + threadIdx.x]; if (ch >= 0) { const value_t m = (ch >= N) ? (cnt += countd[ch], mass[i * THREADS3 + threadIdx.x]) : (cnt++, massd[ch]); // add child's contribution cm += m; px += posxd[ch] * m; py += posyd[ch] * m; } } countd[k] = cnt; const value_t m = 1.0f / cm; posxd[k] = px * m; posyd[k] = py * m; __threadfence(); // make sure data are visible before setting mass massd[k] = cm; } CONTINUE_LOOP: k += inc; // move on to next cell } k = restart; } int j = 0; // iterate over all cells assigned to thread while (k <= NNODES) { if (massd[k] >= 0) { k += inc; goto SKIP_LOOP; } if (j == 0) { j = 4; for (int i = 0; i < 4; i++) { const auto ch = childd[k * 4 + i]; child[i * THREADS3 + threadIdx.x] = ch; if ((ch < N) or ((mass[i * THREADS3 + threadIdx.x] = massd[ch]) >= 0)) j--; } } else { j = 4; for (int i = 0; i < 4; i++) { const auto ch = child[i * THREADS3 + threadIdx.x]; if ((ch < N) or (mass[i * THREADS3 + threadIdx.x] >= 0) or ((mass[i * THREADS3 + threadIdx.x] = massd[ch]) >= 0)) j--; } } if (j == 0) { // all children are ready cm = 0.0f; px = 0.0f; py = 0.0f; auto cnt = 0; #pragma unroll for (int i = 0; i < 4; i++) { const auto ch = child[i * THREADS3 + threadIdx.x]; if (ch >= 0) { const auto m = (ch >= N) ? (cnt += countd[ch], mass[i * THREADS3 + threadIdx.x]) : (cnt++, massd[ch]); // add child's contribution cm += m; px += posxd[ch] * m; py += posyd[ch] * m; } } countd[k] = cnt; const value_t m = 1.0f / cm; posxd[k] = px * m; posyd[k] = py * m; flag = 1; } SKIP_LOOP: __threadfence(); if (flag != 0) { massd[k] = cm; k += inc; flag = 0; } } } /** * Sort the cells */ template <typename value_idx> __global__ __launch_bounds__(THREADS4, FACTOR4) void SortKernel(value_idx* restrict sortd, const value_idx* restrict countd, volatile value_idx* restrict startd, value_idx* restrict childd, const value_idx NNODES, const value_idx N, const value_idx* restrict bottomd) { const value_idx bottom = bottomd[0]; const value_idx dec = blockDim.x * gridDim.x; value_idx k = NNODES + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x; value_idx start; value_idx limiter = 0; // iterate over all cells assigned to thread while (k >= bottom) { // To control possible infinite loops if (++limiter > NNODES) break; // Not a child so skip if ((start = startd[k]) < 0) continue; int j = 0; for (int i = 0; i < 4; i++) { const auto ch = childd[k * 4 + i]; if (ch >= 0) { if (i != j) { // move children to front (needed later for speed) childd[k * 4 + i] = -1; childd[k * 4 + j] = ch; } if (ch >= N) { // child is a cell startd[ch] = start; start += countd[ch]; // add #bodies in subtree } else if (start <= NNODES and start >= 0) { // child is a body sortd[start++] = ch; } j++; } } k -= dec; // move on to next cell } } /** * Calculate the repulsive forces using the KD Tree */ template <typename value_idx, typename value_t> __global__ __launch_bounds__( THREADS5, 1) void RepulsionKernel(/* int *restrict errd, */ const float theta, const float epssqd, // correction for zero distance const value_idx* restrict sortd, const value_idx* restrict childd, const value_t* restrict massd, const value_t* restrict posxd, const value_t* restrict posyd, value_t* restrict velxd, value_t* restrict velyd, value_t* restrict Z_norm, const value_t theta_squared, const value_idx NNODES, const value_idx FOUR_NNODES, const value_idx N, const value_t* restrict radiusd_squared, const value_idx* restrict maxdepthd) { // Return if max depth is too deep // Not possible since I limited it to 32 // if (maxdepthd[0] > 32) // { // atomicExch(errd, max_depth); // return; // } const value_t EPS_PLUS_1 = epssqd + 1.0f; __shared__ value_idx pos[THREADS5], node[THREADS5]; __shared__ value_t dq[THREADS5]; if (threadIdx.x == 0) { const auto max_depth = maxdepthd[0]; dq[0] = __fdividef(radiusd_squared[0], theta_squared); for (auto i = 1; i < max_depth; i++) { dq[i] = dq[i - 1] * 0.25f; dq[i - 1] += epssqd; } dq[max_depth - 1] += epssqd; // Add one so EPS_PLUS_1 can be compared for (auto i = 0; i < max_depth; i++) dq[i] += 1.0f; } __syncthreads(); // figure out first thread in each warp (lane 0) // const int base = threadIdx.x / 32; // const int sbase = base * 32; const int sbase = (threadIdx.x / 32) * 32; const bool SBASE_EQ_THREAD = (sbase == threadIdx.x); const int diff = threadIdx.x - sbase; // make multiple copies to avoid index calculations later // Always true // if (diff < 32) dq[diff + sbase] = dq[diff]; //__syncthreads(); __threadfence_block(); // iterate over all bodies assigned to thread const auto MAX_SIZE = FOUR_NNODES + 4; for (auto k = threadIdx.x + blockIdx.x * blockDim.x; k < N; k += blockDim.x * gridDim.x) { const auto i = sortd[k]; // get permuted/sorted index // cache position info if (i < 0 or i >= MAX_SIZE) continue; const value_t px = posxd[i]; const value_t py = posyd[i]; value_t vx = 0.0f; value_t vy = 0.0f; value_t normsum = 0.0f; // initialize iteration stack, i.e., push root node onto stack int depth = sbase; if (SBASE_EQ_THREAD == true) { pos[sbase] = 0; node[sbase] = FOUR_NNODES; } do { // stack is not empty auto pd = pos[depth]; auto nd = node[depth]; while (pd < 4) { const auto index = nd + pd++; if (index < 0 or index >= MAX_SIZE) break; const auto n = childd[index]; // load child pointer // Non child if (n < 0 or n > NNODES) break; const value_t dx = px - posxd[n]; const value_t dy = py - posyd[n]; const value_t dxy1 = dx * dx + dy * dy + EPS_PLUS_1; if ((n < N) or __all_sync(__activemask(), dxy1 >= dq[depth])) { const value_t tdist_2 = __fdividef(massd[n], dxy1 * dxy1); normsum += tdist_2 * dxy1; vx += dx * tdist_2; vy += dy * tdist_2; } else { // push cell onto stack if (SBASE_EQ_THREAD == true) { pos[depth] = pd; node[depth] = nd; } depth++; pd = 0; nd = n * 4; } } } while (--depth >= sbase); // done with this level // update velocity velxd[i] += vx; velyd[i] += vy; atomicAdd(Z_norm, normsum); } } /** * Fast attractive kernel. Uses COO matrix. */ template <typename value_idx, typename value_t> __global__ void attractive_kernel_bh(const value_t* restrict VAL, const value_idx* restrict COL, const value_idx* restrict ROW, const value_t* restrict Y1, const value_t* restrict Y2, value_t* restrict attract1, value_t* restrict attract2, value_t* restrict Qs, const value_idx NNZ, const value_t dof) { const auto index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= NNZ) return; const auto i = ROW[index]; const auto j = COL[index]; const value_t y1d = Y1[i] - Y1[j]; const value_t y2d = Y2[i] - Y2[j]; value_t dist = y1d * y1d + y2d * y2d; // As a sum of squares, SED is mathematically >= 0. There might be a source of // NaNs upstream though, so until we find and fix them, enforce that trait. if (!(dist >= 0)) dist = 0.0f; const value_t P = VAL[index]; const value_t Q = compute_q(dist, dof); const value_t PQ = P * Q; // Apply forces atomicAdd(&attract1[i], PQ * y1d); atomicAdd(&attract2[i], PQ * y2d); if (Qs) { // when computing KL div Qs[index] = Q; } // TODO: Convert attractive forces to CSR format } /** * Apply gradient updates. */ template <typename value_idx, typename value_t> __global__ __launch_bounds__(THREADS6, 1) void IntegrationKernel(const float eta, const float momentum, const float exaggeration, value_t* restrict Y1, value_t* restrict Y2, const value_t* restrict attract1, const value_t* restrict attract2, const value_t* restrict repel1, const value_t* restrict repel2, value_t* restrict gains1, value_t* restrict gains2, value_t* restrict old_forces1, value_t* restrict old_forces2, const value_t* restrict Z, const value_idx N) { value_t ux, uy, gx, gy; // iterate over all bodies assigned to thread const auto inc = blockDim.x * gridDim.x; const value_t Z_norm = Z[0]; for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += inc) { const value_t dx = exaggeration * attract1[i] - Z_norm * repel1[i]; const value_t dy = exaggeration * attract2[i] - Z_norm * repel2[i]; if (signbit(dx) != signbit(ux = old_forces1[i])) gx = gains1[i] + 0.2f; else gx = gains1[i] * 0.8f; if (gx < 0.01f) gx = 0.01f; if (signbit(dy) != signbit(uy = old_forces2[i])) gy = gains2[i] + 0.2f; else gy = gains2[i] * 0.8f; if (gy < 0.01f) gy = 0.01f; gains1[i] = gx; gains2[i] = gy; old_forces1[i] = ux = momentum * ux - eta * gx * dx; old_forces2[i] = uy = momentum * uy - eta * gy * dy; Y1[i] += ux; Y2[i] += uy; } } } // namespace BH } // namespace TSNE } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsne/tsne.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "tsne_runner.cuh" #include <cuml/manifold/tsne.h> #include <raft/core/handle.hpp> #include <raft/distance/distance_types.hpp> namespace ML { template <typename tsne_input, typename value_idx, typename value_t> value_t _fit(const raft::handle_t& handle, tsne_input& input, knn_graph<value_idx, value_t>& k_graph, TSNEParams& params) { TSNE_runner<tsne_input, value_idx, value_t> runner(handle, input, k_graph, params); return runner.run(); // returns the Kullback–Leibler divergence } void TSNE_fit(const raft::handle_t& handle, float* X, float* Y, int n, int p, int64_t* knn_indices, float* knn_dists, TSNEParams& params, float* kl_div) { ASSERT(n > 0 && p > 0 && params.dim > 0 && params.n_neighbors > 0 && X != NULL && Y != NULL, "Wrong input args"); manifold_dense_inputs_t<float> input(X, Y, n, p); knn_graph<int64_t, float> k_graph(n, params.n_neighbors, knn_indices, knn_dists); float kl_div_v = _fit<manifold_dense_inputs_t<float>, knn_indices_dense_t, float>( handle, input, k_graph, params); if (kl_div) { *kl_div = kl_div_v; } } void TSNE_fit_sparse(const raft::handle_t& handle, int* indptr, int* indices, float* data, float* Y, int nnz, int n, int p, int* knn_indices, float* knn_dists, TSNEParams& params, float* kl_div) { ASSERT(n > 0 && p > 0 && params.dim > 0 && params.n_neighbors > 0 && indptr != NULL && indices != NULL && data != NULL && Y != NULL, "Wrong input args"); manifold_sparse_inputs_t<int, float> input(indptr, indices, data, Y, nnz, n, p); knn_graph<int, float> k_graph(n, params.n_neighbors, knn_indices, knn_dists); float kl_div_v = _fit<manifold_sparse_inputs_t<int, float>, knn_indices_sparse_t, float>( handle, input, k_graph, params); if (kl_div) { *kl_div = kl_div_v; } } } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/tsne
rapidsai_public_repos/cuml/cpp/src/tsne/cannylab/bh.cu
/* ECL-BH v4.5: Simulation of the gravitational forces in a star cluster using the Barnes-Hut n-body algorithm. Copyright (c) 2010-2020 Texas State University. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Texas State University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Martin Burtscher and Sahar Azimi URL: The latest version of this code is available at https://userweb.cs.txstate.edu/~burtscher/research/ECL-BH/. Publication: This work is described in detail in the following paper. Martin Burtscher and Keshav Pingali. An Efficient CUDA Implementation of the Tree-based Barnes Hut n-Body Algorithm. Chapter 6 in GPU Computing Gems Emerald Edition, pp. 75-92. January 2011. */ #include <cuda.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> // threads per block #define THREADS1 1024 /* must be a power of 2 */ #define THREADS2 1024 #define THREADS3 768 /* shared-memory limited on some devices */ #define THREADS4 1024 #define THREADS5 1024 #define THREADS6 1024 // block count = factor * #SMs #define FACTOR1 2 #define FACTOR2 2 #define FACTOR3 1 /* must all be resident at the same time */ #define FACTOR4 1 /* must all be resident at the same time */ #define FACTOR5 2 #define FACTOR6 2 #define WARPSIZE 32 #define MAXDEPTH 32 __device__ volatile int stepd, bottomd; __device__ unsigned int blkcntd; __device__ volatile float radiusd; /******************************************************************************/ /*** initialize memory ********************************************************/ /******************************************************************************/ __global__ void InitializationKernel() { stepd = -1; blkcntd = 0; } /******************************************************************************/ /*** compute center and radius ************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS1, FACTOR1) void BoundingBoxKernel(const int nnodesd, const int nbodiesd, int* const __restrict__ startd, int* const __restrict__ childd, float4* const __restrict__ posMassd, float3* const __restrict__ maxd, float3* const __restrict__ mind) { int i, j, k, inc; float val; __shared__ volatile float sminx[THREADS1], smaxx[THREADS1], sminy[THREADS1], smaxy[THREADS1], sminz[THREADS1], smaxz[THREADS1]; float3 min, max; // initialize with valid data (in case #bodies < #threads) const float4 p0 = posMassd[0]; min.x = max.x = p0.x; min.y = max.y = p0.y; min.z = max.z = p0.z; // scan all bodies i = threadIdx.x; inc = THREADS1 * gridDim.x; for (j = i + blockIdx.x * THREADS1; j < nbodiesd; j += inc) { const float4 p = posMassd[j]; val = p.x; min.x = fminf(min.x, val); max.x = fmaxf(max.x, val); val = p.y; min.y = fminf(min.y, val); max.y = fmaxf(max.y, val); val = p.z; min.z = fminf(min.z, val); max.z = fmaxf(max.z, val); } // reduction in shared memory sminx[i] = min.x; smaxx[i] = max.x; sminy[i] = min.y; smaxy[i] = max.y; sminz[i] = min.z; smaxz[i] = max.z; for (j = THREADS1 / 2; j > 0; j /= 2) { __syncthreads(); if (i < j) { k = i + j; sminx[i] = min.x = fminf(min.x, sminx[k]); smaxx[i] = max.x = fmaxf(max.x, smaxx[k]); sminy[i] = min.y = fminf(min.y, sminy[k]); smaxy[i] = max.y = fmaxf(max.y, smaxy[k]); sminz[i] = min.z = fminf(min.z, sminz[k]); smaxz[i] = max.z = fmaxf(max.z, smaxz[k]); } } // write block result to global memory if (i == 0) { k = blockIdx.x; mind[k] = min; maxd[k] = max; __threadfence(); inc = gridDim.x - 1; if (inc == atomicInc(&blkcntd, inc)) { // I'm the last block, so combine all block results for (j = 0; j <= inc; j++) { float3 minp = mind[j]; float3 maxp = maxd[j]; min.x = fminf(min.x, minp.x); max.x = fmaxf(max.x, maxp.x); min.y = fminf(min.y, minp.y); max.y = fmaxf(max.y, maxp.y); min.z = fminf(min.z, minp.z); max.z = fmaxf(max.z, maxp.z); } // compute radius val = fmaxf(max.x - min.x, max.y - min.y); radiusd = fmaxf(val, max.z - min.z) * 0.5f; // create root node k = nnodesd; bottomd = k; startd[k] = 0; float4 p; p.x = (min.x + max.x) * 0.5f; p.y = (min.y + max.y) * 0.5f; p.z = (min.z + max.z) * 0.5f; p.w = -1.0f; posMassd[k] = p; k *= 8; for (i = 0; i < 8; i++) childd[k + i] = -1; stepd++; } } } /******************************************************************************/ /*** build tree ***************************************************************/ /******************************************************************************/ __global__ __launch_bounds__(1024, 1) void ClearKernel1(const int nnodesd, const int nbodiesd, int* const __restrict__ childd) { int k, inc, top, bottom; top = 8 * nnodesd; bottom = 8 * nbodiesd; inc = blockDim.x * gridDim.x; k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size if (k < bottom) k += inc; // iterate over all cells assigned to thread while (k < top) { childd[k] = -1; k += inc; } } __global__ __launch_bounds__(THREADS2, FACTOR2) void TreeBuildingKernel( const int nnodesd, const int nbodiesd, volatile int* const __restrict__ childd, const float4* const __restrict__ posMassd) { int i, j, depth, skip, inc; float x, y, z, r; float dx, dy, dz; int ch, n, cell, locked, patch; float radius; // cache root data radius = radiusd * 0.5f; const float4 root = posMassd[nnodesd]; skip = 1; inc = blockDim.x * gridDim.x; i = threadIdx.x + blockIdx.x * blockDim.x; // iterate over all bodies assigned to thread while (i < nbodiesd) { const float4 p = posMassd[i]; if (skip != 0) { // new body, so start traversing at root skip = 0; n = nnodesd; depth = 1; r = radius; dx = dy = dz = -r; j = 0; // determine which child to follow if (root.x < p.x) { j = 1; dx = r; } if (root.y < p.y) { j |= 2; dy = r; } if (root.z < p.z) { j |= 4; dz = r; } x = root.x + dx; y = root.y + dy; z = root.z + dz; } // follow path to leaf cell ch = childd[n * 8 + j]; while (ch >= nbodiesd) { n = ch; depth++; r *= 0.5f; dx = dy = dz = -r; j = 0; // determine which child to follow if (x < p.x) { j = 1; dx = r; } if (y < p.y) { j |= 2; dy = r; } if (z < p.z) { j |= 4; dz = r; } x += dx; y += dy; z += dz; ch = childd[n * 8 + j]; } if (ch != -2) { // skip if child pointer is locked and try again later locked = n * 8 + j; if (ch == -1) { if (-1 == atomicCAS((int*)&childd[locked], -1, i)) { // if null, just insert the new body i += inc; // move on to next body skip = 1; } } else { // there already is a body at this position if (ch == atomicCAS((int*)&childd[locked], ch, -2)) { // try to lock patch = -1; const float4 chp = posMassd[ch]; // create new cell(s) and insert the old and new bodies do { depth++; if (depth > MAXDEPTH) { printf("ERROR: maximum depth exceeded (bodies are too close together)\n"); asm("trap;"); } cell = atomicSub((int*)&bottomd, 1) - 1; if (cell <= nbodiesd) { printf("ERROR: out of cell memory\n"); asm("trap;"); } if (patch != -1) { childd[n * 8 + j] = cell; } patch = max(patch, cell); j = 0; if (x < chp.x) j = 1; if (y < chp.y) j |= 2; if (z < chp.z) j |= 4; childd[cell * 8 + j] = ch; n = cell; r *= 0.5f; dx = dy = dz = -r; j = 0; if (x < p.x) { j = 1; dx = r; } if (y < p.y) { j |= 2; dy = r; } if (z < p.z) { j |= 4; dz = r; } x += dx; y += dy; z += dz; ch = childd[n * 8 + j]; // repeat until the two bodies are different children } while (ch >= 0); childd[n * 8 + j] = i; i += inc; // move on to next body skip = 2; } } } __syncthreads(); // optional barrier for performance __threadfence(); if (skip == 2) { childd[locked] = patch; } } } __global__ __launch_bounds__(1024, 1) void ClearKernel2(const int nnodesd, int* const __restrict__ startd, float4* const __restrict__ posMassd) { int k, inc, bottom; bottom = bottomd; inc = blockDim.x * gridDim.x; k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size if (k < bottom) k += inc; // iterate over all cells assigned to thread while (k < nnodesd) { posMassd[k].w = -1.0f; startd[k] = -1; k += inc; } } /******************************************************************************/ /*** compute center of mass ***************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS3, FACTOR3) void SummarizationKernel( const int nnodesd, const int nbodiesd, volatile int* const __restrict__ countd, const int* const __restrict__ childd, volatile float4* const __restrict__ posMassd) { int i, j, k, ch, inc, cnt, bottom; float m, cm, px, py, pz; __shared__ int child[THREADS3 * 8]; __shared__ float mass[THREADS3 * 8]; bottom = bottomd; inc = blockDim.x * gridDim.x; k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size if (k < bottom) k += inc; int restart = k; for (j = 0; j < 3; j++) { // wait-free pre-passes // iterate over all cells assigned to thread while (k <= nnodesd) { if (posMassd[k].w < 0.0f) { for (i = 0; i < 8; i++) { ch = childd[k * 8 + i]; child[i * THREADS3 + threadIdx.x] = ch; // cache children if ((ch >= nbodiesd) && ((mass[i * THREADS3 + threadIdx.x] = posMassd[ch].w) < 0.0f)) { break; } } if (i == 8) { // all children are ready cm = 0.0f; px = 0.0f; py = 0.0f; pz = 0.0f; cnt = 0; for (i = 0; i < 8; i++) { ch = child[i * THREADS3 + threadIdx.x]; if (ch >= 0) { // four reads due to missing copy constructor for "volatile float4" const float chx = posMassd[ch].x; const float chy = posMassd[ch].y; const float chz = posMassd[ch].z; const float chw = posMassd[ch].w; if (ch >= nbodiesd) { // count bodies (needed later) m = mass[i * THREADS3 + threadIdx.x]; cnt += countd[ch]; } else { m = chw; cnt++; } // add child's contribution cm += m; px += chx * m; py += chy * m; pz += chz * m; } } countd[k] = cnt; m = 1.0f / cm; // four writes due to missing copy constructor for "volatile float4" posMassd[k].x = px * m; posMassd[k].y = py * m; posMassd[k].z = pz * m; __threadfence(); posMassd[k].w = cm; } } k += inc; // move on to next cell } k = restart; } j = 0; // iterate over all cells assigned to thread while (k <= nnodesd) { if (posMassd[k].w >= 0.0f) { k += inc; } else { if (j == 0) { j = 8; for (i = 0; i < 8; i++) { ch = childd[k * 8 + i]; child[i * THREADS3 + threadIdx.x] = ch; // cache children if ((ch < nbodiesd) || ((mass[i * THREADS3 + threadIdx.x] = posMassd[ch].w) >= 0.0f)) { j--; } } } else { j = 8; for (i = 0; i < 8; i++) { ch = child[i * THREADS3 + threadIdx.x]; if ((ch < nbodiesd) || (mass[i * THREADS3 + threadIdx.x] >= 0.0f) || ((mass[i * THREADS3 + threadIdx.x] = posMassd[ch].w) >= 0.0f)) { j--; } } } if (j == 0) { // all children are ready cm = 0.0f; px = 0.0f; py = 0.0f; pz = 0.0f; cnt = 0; for (i = 0; i < 8; i++) { ch = child[i * THREADS3 + threadIdx.x]; if (ch >= 0) { // four reads due to missing copy constructor for "volatile float4" const float chx = posMassd[ch].x; const float chy = posMassd[ch].y; const float chz = posMassd[ch].z; const float chw = posMassd[ch].w; if (ch >= nbodiesd) { // count bodies (needed later) m = mass[i * THREADS3 + threadIdx.x]; cnt += countd[ch]; } else { m = chw; cnt++; } // add child's contribution cm += m; px += chx * m; py += chy * m; pz += chz * m; } } countd[k] = cnt; m = 1.0f / cm; // four writes due to missing copy constructor for "volatile float4" posMassd[k].x = px * m; posMassd[k].y = py * m; posMassd[k].z = pz * m; __threadfence(); posMassd[k].w = cm; k += inc; } } } } /******************************************************************************/ /*** sort bodies **************************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS4, FACTOR4) void SortKernel(const int nnodesd, const int nbodiesd, int* const __restrict__ sortd, const int* const __restrict__ countd, volatile int* const __restrict__ startd, int* const __restrict__ childd) { int i, j, k, ch, dec, start, bottom; bottom = bottomd; dec = blockDim.x * gridDim.x; k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x; // iterate over all cells assigned to thread while (k >= bottom) { start = startd[k]; if (start >= 0) { j = 0; for (i = 0; i < 8; i++) { ch = childd[k * 8 + i]; if (ch >= 0) { if (i != j) { // move children to front (needed later for speed) childd[k * 8 + i] = -1; childd[k * 8 + j] = ch; } j++; if (ch >= nbodiesd) { // child is a cell startd[ch] = start; // set start ID of child start += countd[ch]; // add #bodies in subtree } else { // child is a body sortd[start] = ch; // record body in 'sorted' array start++; } } } k -= dec; // move on to next cell } __syncthreads(); // optional barrier for performance } } /******************************************************************************/ /*** compute force ************************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS5, FACTOR5) void ForceCalculationKernel( const int nnodesd, const int nbodiesd, const float dthfd, const float itolsqd, const float epssqd, const int* const __restrict__ sortd, const int* const __restrict__ childd, const float4* const __restrict__ posMassd, float2* const __restrict__ veld, float4* const __restrict__ accVeld) { int i, j, k, n, depth, base, sbase, diff, pd, nd; float ax, ay, az, dx, dy, dz, tmp; __shared__ volatile int pos[MAXDEPTH * THREADS5 / WARPSIZE], node[MAXDEPTH * THREADS5 / WARPSIZE]; __shared__ float dq[MAXDEPTH * THREADS5 / WARPSIZE]; if (0 == threadIdx.x) { tmp = radiusd * 2; // precompute values that depend only on tree level dq[0] = tmp * tmp * itolsqd; for (i = 1; i < MAXDEPTH; i++) { dq[i] = dq[i - 1] * 0.25f; dq[i - 1] += epssqd; } dq[i - 1] += epssqd; } __syncthreads(); // figure out first thread in each warp (lane 0) base = threadIdx.x / WARPSIZE; sbase = base * WARPSIZE; j = base * MAXDEPTH; diff = threadIdx.x - sbase; // make multiple copies to avoid index calculations later if (diff < MAXDEPTH) { dq[diff + j] = dq[diff]; } __syncthreads(); // iterate over all bodies assigned to thread for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) { i = sortd[k]; // get permuted/sorted index // cache position info const float4 pi = posMassd[i]; ax = 0.0f; ay = 0.0f; az = 0.0f; // initialize iteration stack, i.e., push root node onto stack depth = j; if (sbase == threadIdx.x) { pos[j] = 0; node[j] = nnodesd * 8; } do { // stack is not empty pd = pos[depth]; nd = node[depth]; while (pd < 8) { // node on top of stack has more children to process n = childd[nd + pd]; // load child pointer pd++; if (n >= 0) { const float4 pn = posMassd[n]; dx = pn.x - pi.x; dy = pn.y - pi.y; dz = pn.z - pi.z; tmp = dx * dx + (dy * dy + (dz * dz + epssqd)); // compute distance squared (plus softening) if ((n < nbodiesd) || __all_sync(0xffffffff, tmp >= dq[depth])) { // check if all threads agree that cell // is far enough away (or is a body) tmp = rsqrtf(tmp); // compute distance tmp = pn.w * tmp * tmp * tmp; ax += dx * tmp; ay += dy * tmp; az += dz * tmp; } else { // push cell onto stack if (sbase == threadIdx.x) { pos[depth] = pd; node[depth] = nd; } depth++; pd = 0; nd = n * 8; } } else { pd = 8; // early out because all remaining children are also zero } } depth--; // done with this level } while (depth >= j); float4 acc = accVeld[i]; if (stepd > 0) { // update velocity float2 v = veld[i]; v.x += (ax - acc.x) * dthfd; v.y += (ay - acc.y) * dthfd; acc.w += (az - acc.z) * dthfd; veld[i] = v; } // save computed acceleration acc.x = ax; acc.y = ay; acc.z = az; accVeld[i] = acc; } } /******************************************************************************/ /*** advance bodies ***********************************************************/ /******************************************************************************/ __global__ __launch_bounds__(THREADS6, FACTOR6) void IntegrationKernel(const int nbodiesd, const float dtimed, const float dthfd, float4* const __restrict__ posMass, float2* const __restrict__ veld, float4* const __restrict__ accVeld) { int i, inc; float dvelx, dvely, dvelz; float velhx, velhy, velhz; // iterate over all bodies assigned to thread inc = blockDim.x * gridDim.x; for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) { // integrate float4 acc = accVeld[i]; dvelx = acc.x * dthfd; dvely = acc.y * dthfd; dvelz = acc.z * dthfd; float2 v = veld[i]; velhx = v.x + dvelx; velhy = v.y + dvely; velhz = acc.w + dvelz; float4 p = posMass[i]; p.x += velhx * dtimed; p.y += velhy * dtimed; p.z += velhz * dtimed; posMass[i] = p; v.x = velhx + dvelx; v.y = velhy + dvely; acc.w = velhz + dvelz; veld[i] = v; accVeld[i] = acc; } } /******************************************************************************/ static void CudaTest(const char* const msg) { cudaError_t e; cudaDeviceSynchronize(); if (cudaSuccess != (e = cudaGetLastError())) { fprintf(stderr, "%s: %d\n", msg, e); fprintf(stderr, "%s\n", cudaGetErrorString(e)); exit(-1); } } /******************************************************************************/ // random number generator (based on SPLASH-2 code at // https://github.com/staceyson/splash2/blob/master/codes/apps/barnes/util.C) static int randx = 7; static double drnd() { const int lastrand = randx; randx = (1103515245 * randx + 12345) & 0x7FFFFFFF; return (double)lastrand / 2147483648.0; } /******************************************************************************/ int main(int argc, char* argv[]) { int i, run, blocks; int nnodes, nbodies, step, timesteps; double runtime; float dtime, dthf, epssq, itolsq; float time, timing[7]; cudaEvent_t start, stop; float4* accVel; float2* vel; int *sortl, *childl, *countl, *startl; float4* accVell; float2* vell; float3 *maxl, *minl; float4* posMassl; float4* posMass; double rsc, vsc, r, v, x, y, z, sq, scale; // perform some checks printf("ECL-BH v4.5\n"); printf("Copyright (c) 2010-2020 Texas State University\n"); fflush(stdout); if (argc != 4) { fprintf(stderr, "\n"); fprintf(stderr, "arguments: number_of_bodies number_of_timesteps device\n"); exit(-1); } int deviceCount; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "There is no device supporting CUDA\n"); exit(-1); } const int dev = atoi(argv[3]); if ((dev < 0) || (deviceCount <= dev)) { fprintf(stderr, "There is no device %d\n", dev); exit(-1); } cudaSetDevice(dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) { fprintf(stderr, "There is no CUDA capable device\n"); exit(-1); } if (deviceProp.major < 3) { fprintf(stderr, "Need at least compute capability 3.0\n"); exit(-1); } if (deviceProp.warpSize != WARPSIZE) { fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize); exit(-1); } blocks = deviceProp.multiProcessorCount; const int mTSM = deviceProp.maxThreadsPerMultiProcessor; printf("gpu: %s with %d SMs and %d mTpSM (%.1f MHz and %.1f MHz)\n", deviceProp.name, blocks, mTSM, deviceProp.clockRate * 0.001, deviceProp.memoryClockRate * 0.001); if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE - 1) != 0)) { fprintf(stderr, "Warp size must be greater than zero and a power of two\n"); exit(-1); } if (MAXDEPTH > WARPSIZE) { fprintf(stderr, "MAXDEPTH must be less than or equal to WARPSIZE\n"); exit(-1); } if ((THREADS1 <= 0) || (THREADS1 & (THREADS1 - 1) != 0)) { fprintf(stderr, "THREADS1 must be greater than zero and a power of two\n"); exit(-1); } // set L1/shared memory configuration cudaFuncSetCacheConfig(BoundingBoxKernel, cudaFuncCachePreferShared); cudaFuncSetCacheConfig(TreeBuildingKernel, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(ClearKernel1, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(ClearKernel2, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(SummarizationKernel, cudaFuncCachePreferShared); cudaFuncSetCacheConfig(SortKernel, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(ForceCalculationKernel, cudaFuncCachePreferEqual); cudaFuncSetCacheConfig(IntegrationKernel, cudaFuncCachePreferL1); cudaGetLastError(); // reset error value for (run = 0; run < 1; run++) { // in case multiple runs are desired for timing for (i = 0; i < 7; i++) timing[i] = 0.0f; nbodies = atoi(argv[1]); if (nbodies < 1) { fprintf(stderr, "nbodies is too small: %d\n", nbodies); exit(-1); } if (nbodies > (1 << 30)) { fprintf(stderr, "nbodies is too large: %d\n", nbodies); exit(-1); } nnodes = nbodies * 2; if (nnodes < 1024 * blocks) nnodes = 1024 * blocks; while ((nnodes & (WARPSIZE - 1)) != 0) nnodes++; nnodes--; timesteps = atoi(argv[2]); dtime = 0.025; dthf = dtime * 0.5f; epssq = 0.05 * 0.05; itolsq = 1.0f / (0.5 * 0.5); // allocate memory if (run == 0) { printf("configuration: %d bodies, %d time steps\n", nbodies, timesteps); accVel = (float4*)malloc(sizeof(float4) * nbodies); if (accVel == NULL) { fprintf(stderr, "cannot allocate accVel\n"); exit(-1); } vel = (float2*)malloc(sizeof(float2) * nbodies); if (vel == NULL) { fprintf(stderr, "cannot allocate vel\n"); exit(-1); } posMass = (float4*)malloc(sizeof(float4) * nbodies); if (posMass == NULL) { fprintf(stderr, "cannot allocate posMass\n"); exit(-1); } if (cudaSuccess != cudaMalloc((void**)&childl, sizeof(int) * (nnodes + 1) * 8)) fprintf(stderr, "could not allocate childd\n"); CudaTest("couldn't allocate childd"); if (cudaSuccess != cudaMalloc((void**)&vell, sizeof(float2) * (nnodes + 1))) fprintf(stderr, "could not allocate veld\n"); CudaTest("couldn't allocate veld"); if (cudaSuccess != cudaMalloc((void**)&accVell, sizeof(float4) * (nnodes + 1))) fprintf(stderr, "could not allocate accVeld\n"); CudaTest("couldn't allocate accVeld"); if (cudaSuccess != cudaMalloc((void**)&countl, sizeof(int) * (nnodes + 1))) fprintf(stderr, "could not allocate countd\n"); CudaTest("couldn't allocate countd"); if (cudaSuccess != cudaMalloc((void**)&startl, sizeof(int) * (nnodes + 1))) fprintf(stderr, "could not allocate startd\n"); CudaTest("couldn't allocate startd"); if (cudaSuccess != cudaMalloc((void**)&sortl, sizeof(int) * (nnodes + 1))) fprintf(stderr, "could not allocate sortd\n"); CudaTest("couldn't allocate sortd"); if (cudaSuccess != cudaMalloc((void**)&posMassl, sizeof(float4) * (nnodes + 1))) fprintf(stderr, "could not allocate posMassd\n"); CudaTest("couldn't allocate posMassd"); if (cudaSuccess != cudaMalloc((void**)&maxl, sizeof(float3) * blocks * FACTOR1)) fprintf(stderr, "could not allocate maxd\n"); CudaTest("couldn't allocate maxd"); if (cudaSuccess != cudaMalloc((void**)&minl, sizeof(float3) * blocks * FACTOR1)) fprintf(stderr, "could not allocate mind\n"); CudaTest("couldn't allocate mind"); } // generate input (based on SPLASH-2 code at // https://github.com/staceyson/splash2/blob/master/codes/apps/barnes/code.C) rsc = (3 * 3.1415926535897932384626433832795) / 16; vsc = sqrt(1.0 / rsc); for (i = 0; i < nbodies; i++) { float4 p; p.w = 1.0 / nbodies; r = 1.0 / sqrt(pow(drnd() * 0.999, -2.0 / 3.0) - 1); do { x = drnd() * 2.0 - 1.0; y = drnd() * 2.0 - 1.0; z = drnd() * 2.0 - 1.0; sq = x * x + y * y + z * z; } while (sq > 1.0); scale = rsc * r / sqrt(sq); p.x = x * scale; p.y = y * scale; p.z = z * scale; posMass[i] = p; do { x = drnd(); y = drnd() * 0.1; } while (y > x * x * pow(1 - x * x, 3.5)); v = x * sqrt(2.0 / sqrt(1 + r * r)); do { x = drnd() * 2.0 - 1.0; y = drnd() * 2.0 - 1.0; z = drnd() * 2.0 - 1.0; sq = x * x + y * y + z * z; } while (sq > 1.0); scale = vsc * v / sqrt(sq); float2 v; v.x = x * scale; v.y = y * scale; accVel[i].w = z * scale; vel[i] = v; } if (cudaSuccess != cudaMemcpy(accVell, accVel, sizeof(float4) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of vel to device failed\n"); CudaTest("vel copy to device failed"); if (cudaSuccess != cudaMemcpy(vell, vel, sizeof(float2) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of vel to device failed\n"); CudaTest("vel copy to device failed"); if (cudaSuccess != cudaMemcpy(posMassl, posMass, sizeof(float4) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posMass to device failed\n"); CudaTest("posMass copy to device failed"); // run timesteps (launch GPU kernels) cudaEventCreate(&start); cudaEventCreate(&stop); struct timeval starttime, endtime; gettimeofday(&starttime, NULL); cudaEventRecord(start, 0); InitializationKernel<<<1, 1>>>(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[0] += time; // CudaTest("kernel 0 launch failed"); for (step = 0; step < timesteps; step++) { cudaEventRecord(start, 0); BoundingBoxKernel<<<blocks * FACTOR1, THREADS1>>>( nnodes, nbodies, startl, childl, posMassl, maxl, minl); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[1] += time; // CudaTest("kernel 1 launch failed"); cudaEventRecord(start, 0); ClearKernel1<<<blocks * 1, 1024>>>(nnodes, nbodies, childl); TreeBuildingKernel<<<blocks * FACTOR2, THREADS2>>>(nnodes, nbodies, childl, posMassl); ClearKernel2<<<blocks * 1, 1024>>>(nnodes, startl, posMassl); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[2] += time; // CudaTest("kernel 2 launch failed"); cudaEventRecord(start, 0); SummarizationKernel<<<blocks * FACTOR3, THREADS3>>>( nnodes, nbodies, countl, childl, posMassl); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[3] += time; // CudaTest("kernel 3 launch failed"); cudaEventRecord(start, 0); SortKernel<<<blocks * FACTOR4, THREADS4>>>(nnodes, nbodies, sortl, countl, startl, childl); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[4] += time; // CudaTest("kernel 4 launch failed"); cudaEventRecord(start, 0); ForceCalculationKernel<<<blocks * FACTOR5, THREADS5>>>( nnodes, nbodies, dthf, itolsq, epssq, sortl, childl, posMassl, vell, accVell); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[5] += time; // CudaTest("kernel 5 launch failed"); cudaEventRecord(start, 0); IntegrationKernel<<<blocks * FACTOR6, THREADS6>>>( nbodies, dtime, dthf, posMassl, vell, accVell); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); timing[6] += time; // CudaTest("kernel 6 launch failed"); } CudaTest("kernel launch failed"); cudaEventDestroy(start); cudaEventDestroy(stop); gettimeofday(&endtime, NULL); runtime = (endtime.tv_sec + endtime.tv_usec / 1000000.0 - starttime.tv_sec - starttime.tv_usec / 1000000.0); printf("runtime: %.4lf s (", runtime); time = 0; for (i = 1; i < 7; i++) { printf(" %.1f ", timing[i]); time += timing[i]; } printf(") = %.1f ms\n", time); } // transfer final result back to CPU if (cudaSuccess != cudaMemcpy(accVel, accVell, sizeof(float4) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of accVel from device failed\n"); CudaTest("vel copy from device failed"); if (cudaSuccess != cudaMemcpy(vel, vell, sizeof(float2) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of vel from device failed\n"); CudaTest("vel copy from device failed"); if (cudaSuccess != cudaMemcpy(posMass, posMassl, sizeof(float4) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posMass from device failed\n"); CudaTest("posMass copy from device failed"); // print output i = 0; // for (i = 0; i < nbodies; i++) { printf("%.2e %.2e %.2e\n", posMass[i].x, posMass[i].y, posMass[i].z); // } free(vel); free(accVel); free(posMass); cudaFree(childl); cudaFree(vell); cudaFree(accVell); cudaFree(countl); cudaFree(startl); cudaFree(sortl); cudaFree(posMassl); cudaFree(maxl); cudaFree(minl); return 0; }
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/spectral/spectral.cu
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/sparse/coo.hpp> #include <raft/core/handle.hpp> #include <raft/sparse/linalg/spectral.cuh> namespace raft { class handle_t; } namespace ML { namespace Spectral { /** * Given a COO formatted (symmetric) knn graph, this function * computes the spectral embeddings (lowest n_components * eigenvectors), using Lanczos min cut algorithm. * @param rows source vertices of knn graph (size nnz) * @param cols destination vertices of knn graph (size nnz) * @param vals edge weights connecting vertices of knn graph (size nnz) * @param nnz size of rows/cols/vals * @param n number of samples in X * @param n_neighbors the number of neighbors to query for knn graph construction * @param n_components the number of components to project the X into * @param out output array for embedding (size n*n_comonents) */ void fit_embedding(const raft::handle_t& handle, int* rows, int* cols, float* vals, int nnz, int n, int n_components, float* out, unsigned long long seed) { raft::sparse::spectral::fit_embedding(handle, rows, cols, vals, nnz, n, n_components, out, seed); } } // namespace Spectral } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/dbscan/dbscan.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/cluster/dbscan.hpp> #include "dbscan.cuh" #include <raft/util/cudart_utils.hpp> namespace ML { namespace Dbscan { void fit(const raft::handle_t& handle, float* input, int n_rows, int n_cols, float eps, int min_pts, raft::distance::DistanceType metric, int* labels, int* core_sample_indices, float* sample_weight, size_t max_bytes_per_batch, int verbosity, bool opg) { if (opg) dbscanFitImpl<float, int, true>(handle, input, n_rows, n_cols, eps, min_pts, metric, labels, core_sample_indices, sample_weight, max_bytes_per_batch, handle.get_stream(), verbosity); else dbscanFitImpl<float, int, false>(handle, input, n_rows, n_cols, eps, min_pts, metric, labels, core_sample_indices, sample_weight, max_bytes_per_batch, handle.get_stream(), verbosity); } void fit(const raft::handle_t& handle, double* input, int n_rows, int n_cols, double eps, int min_pts, raft::distance::DistanceType metric, int* labels, int* core_sample_indices, double* sample_weight, size_t max_bytes_per_batch, int verbosity, bool opg) { if (opg) dbscanFitImpl<double, int, true>(handle, input, n_rows, n_cols, eps, min_pts, metric, labels, core_sample_indices, sample_weight, max_bytes_per_batch, handle.get_stream(), verbosity); else dbscanFitImpl<double, int, false>(handle, input, n_rows, n_cols, eps, min_pts, metric, labels, core_sample_indices, sample_weight, max_bytes_per_batch, handle.get_stream(), verbosity); } void fit(const raft::handle_t& handle, float* input, int64_t n_rows, int64_t n_cols, float eps, int min_pts, raft::distance::DistanceType metric, int64_t* labels, int64_t* core_sample_indices, float* sample_weight, size_t max_bytes_per_batch, int verbosity, bool opg) { if (opg) dbscanFitImpl<float, int64_t, true>(handle, input, n_rows, n_cols, eps, min_pts, metric, labels, core_sample_indices, sample_weight, max_bytes_per_batch, handle.get_stream(), verbosity); else dbscanFitImpl<float, int64_t, false>(handle, input, n_rows, n_cols, eps, min_pts, metric, labels, core_sample_indices, sample_weight, max_bytes_per_batch, handle.get_stream(), verbosity); } void fit(const raft::handle_t& handle, double* input, int64_t n_rows, int64_t n_cols, double eps, int min_pts, raft::distance::DistanceType metric, int64_t* labels, int64_t* core_sample_indices, double* sample_weight, size_t max_bytes_per_batch, int verbosity, bool opg) { if (opg) dbscanFitImpl<double, int64_t, true>(handle, input, n_rows, n_cols, eps, min_pts, metric, labels, core_sample_indices, sample_weight, max_bytes_per_batch, handle.get_stream(), verbosity); else dbscanFitImpl<double, int64_t, false>(handle, input, n_rows, n_cols, eps, min_pts, metric, labels, core_sample_indices, sample_weight, max_bytes_per_batch, handle.get_stream(), verbosity); } } // namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/dbscan/dbscan_api.cpp
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/cluster/dbscan_api.h> #include <common/cumlHandle.hpp> #include <cuml/cluster/dbscan.hpp> #include <cuml/cuml_api.h> extern "C" { cumlError_t cumlSpDbscanFit(cumlHandle_t handle, float* input, int n_rows, int n_cols, float eps, int min_pts, int* labels, int* core_sample_indices, size_t max_bytes_per_batch, int verbosity) { cumlError_t status; raft::handle_t* handle_ptr; std::tie(handle_ptr, status) = ML::handleMap.lookupHandlePointer(handle); if (status == CUML_SUCCESS) { try { ML::Dbscan::fit(*handle_ptr, input, n_rows, n_cols, eps, min_pts, raft::distance::L2SqrtUnexpanded, labels, core_sample_indices, NULL, max_bytes_per_batch, verbosity); } // TODO: Implement this // catch (const MLCommon::Exception& e) //{ // //log e.what()? // status = e.getErrorCode(); //} catch (...) { status = CUML_ERROR_UNKNOWN; } } return status; } cumlError_t cumlDpDbscanFit(cumlHandle_t handle, double* input, int n_rows, int n_cols, double eps, int min_pts, int* labels, int* core_sample_indices, size_t max_bytes_per_batch, int verbosity) { cumlError_t status; raft::handle_t* handle_ptr; std::tie(handle_ptr, status) = ML::handleMap.lookupHandlePointer(handle); if (status == CUML_SUCCESS) { try { ML::Dbscan::fit(*handle_ptr, input, n_rows, n_cols, eps, min_pts, raft::distance::L2SqrtUnexpanded, labels, core_sample_indices, NULL, max_bytes_per_batch, verbosity); } // TODO: Implement this // catch (const MLCommon::Exception& e) //{ // //log e.what()? // status = e.getErrorCode(); //} catch (...) { status = CUML_ERROR_UNKNOWN; } } return status; } }
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/dbscan/runner.cuh
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "adjgraph/runner.cuh" #include "corepoints/compute.cuh" #include "corepoints/exchange.cuh" #include "mergelabels/runner.cuh" #include "mergelabels/tree_reduction.cuh" #include "vertexdeg/runner.cuh" #include <common/nvtx.hpp> #include <raft/core/nvtx.hpp> #include <raft/label/classlabels.cuh> #include <raft/sparse/csr.hpp> #include <raft/util/cudart_utils.hpp> #include <cuml/common/logger.hpp> #include <raft/core/nvtx.hpp> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/fill.h> #include <thrust/iterator/counting_iterator.h> #include <cstddef> namespace ML { namespace Dbscan { static const int TPB = 256; /** * Adjust labels from weak_cc primitive to match sklearn: * 1. Turn any labels matching MAX_LABEL into -1 * 2. Subtract 1 from all other labels. */ template <typename Index_ = int> __global__ void relabelForSkl(Index_* labels, Index_ N, Index_ MAX_LABEL) { Index_ tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid < N) { if (labels[tid] == MAX_LABEL) { labels[tid] = -1; } else { --labels[tid]; } } } /** * Turn the non-monotonic labels from weak_cc primitive into * an array of labels drawn from a monotonically increasing set. */ template <typename Index_ = int> void final_relabel(Index_* db_cluster, Index_ N, cudaStream_t stream) { Index_ MAX_LABEL = std::numeric_limits<Index_>::max(); raft::label::make_monotonic( db_cluster, db_cluster, N, stream, [MAX_LABEL] __device__(Index_ val) { return val == MAX_LABEL; }); } /** * Run the DBSCAN algorithm (common code for single-GPU and multi-GPU) * @tparam opg Whether we are running in a multi-node multi-GPU context * @param[in] handle raft handle * @param[in] x Input data (N*D row-major device array, or N*N for precomputed) * @param[in] N Number of points * @param[in] D Dimensionality of the points * @param[in] start_row Index of the offset for this node * @param[in] n_owned_rows Number of rows (points) owned by this node * @param[in] eps Epsilon neighborhood criterion * @param[in] min_pts Core points criterion * @param[out] labels Output labels (device array of length N) * @param[out] core_indices If not nullptr, the indices of core points are written in this array * @param[in] algo_vd Algorithm used for the vertex degrees * @param[in] algo_adj Algorithm used for the adjacency graph * @param[in] algo_ccl Algorithm used for the final relabel * @param[in] workspace Temporary global memory buffer used to store intermediate computations * If nullptr, then this function will return the workspace size needed. * It is the responsibility of the user to allocate and free this buffer! * @param[in] batch_size Batch size * @param[in] stream The CUDA stream where to launch the kernels * @return In case the workspace pointer is null, this returns the size needed. */ template <typename Type_f, typename Index_ = int, bool opg = false> std::size_t run(const raft::handle_t& handle, const Type_f* x, Index_ N, Index_ D, Index_ start_row, Index_ n_owned_rows, Type_f eps, Index_ min_pts, Index_* labels, Index_* core_indices, const Type_f* sample_weight, int algo_vd, int algo_adj, int algo_ccl, void* workspace, std::size_t batch_size, cudaStream_t stream, raft::distance::DistanceType metric) { const std::size_t align = 256; Index_ n_batches = raft::ceildiv((std::size_t)n_owned_rows, batch_size); int my_rank; if (opg) { const auto& comm = handle.get_comms(); my_rank = comm.get_rank(); } else my_rank = 0; /** * Note on coupling between data types: * - adjacency graph has a worst case size of N * batch_size elements. Thus, * if N is very close to being greater than the maximum 32-bit IdxType type used, a * 64-bit IdxType should probably be used instead. * - exclusive scan is the CSR row index for the adjacency graph and its values have a * risk of overflowing when N * batch_size becomes larger what can be stored in IdxType * - the vertex degree array has a worst case of each element having all other * elements in their neighborhood, so any IdxType can be safely used, so long as N doesn't * overflow. */ std::size_t adj_size = raft::alignTo<std::size_t>(sizeof(bool) * N * batch_size, align); std::size_t core_pts_size = raft::alignTo<std::size_t>(sizeof(bool) * N, align); std::size_t m_size = raft::alignTo<std::size_t>(sizeof(bool), align); std::size_t vd_size = raft::alignTo<std::size_t>(sizeof(Index_) * (batch_size + 1), align); std::size_t ex_scan_size = raft::alignTo<std::size_t>(sizeof(Index_) * batch_size, align); std::size_t row_cnt_size = raft::alignTo<std::size_t>(sizeof(Index_) * batch_size, align); std::size_t labels_size = raft::alignTo<std::size_t>(sizeof(Index_) * N, align); std::size_t wght_sum_size = sample_weight != nullptr ? raft::alignTo<std::size_t>(sizeof(Type_f) * batch_size, align) : 0; Index_ MAX_LABEL = std::numeric_limits<Index_>::max(); ASSERT(N * batch_size < static_cast<std::size_t>(MAX_LABEL), "An overflow occurred with the current choice of precision " "and the number of samples. (Max allowed batch size is %ld, but was %ld). " "Consider using double precision for the output labels.", (unsigned long)(MAX_LABEL / N), (unsigned long)batch_size); if (workspace == NULL) { auto size = adj_size + core_pts_size + m_size + vd_size + ex_scan_size + row_cnt_size + 2 * labels_size + wght_sum_size; return size; } // partition the temporary workspace needed for different stages of dbscan. Index_ maxadjlen = 0; Index_ curradjlen = 0; char* temp = (char*)workspace; bool* adj = (bool*)temp; temp += adj_size; bool* core_pts = (bool*)temp; temp += core_pts_size; bool* m = (bool*)temp; temp += m_size; Index_* vd = (Index_*)temp; temp += vd_size; Index_* ex_scan = (Index_*)temp; temp += ex_scan_size; Index_* row_counters = (Index_*)temp; temp += row_cnt_size; Index_* labels_temp = (Index_*)temp; temp += labels_size; Index_* work_buffer = (Index_*)temp; temp += labels_size; Type_f* wght_sum = nullptr; if (sample_weight != nullptr) { wght_sum = (Type_f*)temp; temp += wght_sum_size; } // Compute the mask // 1. Compute the part owned by this worker (reversed order of batches to // keep the batch 0 in memory) for (int i = n_batches - 1; i >= 0; i--) { Index_ start_vertex_id = start_row + i * batch_size; Index_ n_points = min(n_owned_rows - i * batch_size, batch_size); CUML_LOG_DEBUG( "- Batch %d / %ld (%ld samples)", i + 1, (unsigned long)n_batches, (unsigned long)n_points); CUML_LOG_DEBUG("--> Computing vertex degrees"); raft::common::nvtx::push_range("Trace::Dbscan::VertexDeg"); VertexDeg::run<Type_f, Index_>(handle, adj, vd, wght_sum, x, sample_weight, eps, N, D, algo_vd, start_vertex_id, n_points, stream, metric); raft::common::nvtx::pop_range(); CUML_LOG_DEBUG("--> Computing core point mask"); raft::common::nvtx::push_range("Trace::Dbscan::CorePoints"); if (wght_sum != nullptr) { CorePoints::compute<Type_f, Index_>( handle, wght_sum, core_pts, min_pts, start_vertex_id, n_points, stream); } else { CorePoints::compute<Index_, Index_>( handle, vd, core_pts, min_pts, start_vertex_id, n_points, stream); } raft::common::nvtx::pop_range(); } // 2. Exchange with the other workers if (opg) CorePoints::exchange(handle, core_pts, N, start_row, stream); // Compute the labelling for the owned part of the graph raft::sparse::WeakCCState state(m); rmm::device_uvector<Index_> adj_graph(0, stream); for (int i = 0; i < n_batches; i++) { Index_ start_vertex_id = start_row + i * batch_size; Index_ n_points = min(n_owned_rows - i * batch_size, batch_size); if (n_points <= 0) break; CUML_LOG_DEBUG( "- Batch %d / %ld (%ld samples)", i + 1, (unsigned long)n_batches, (unsigned long)n_points); // i==0 -> adj and vd for batch 0 already in memory if (i > 0) { CUML_LOG_DEBUG("--> Computing vertex degrees"); raft::common::nvtx::push_range("Trace::Dbscan::VertexDeg"); VertexDeg::run<Type_f, Index_>(handle, adj, vd, nullptr, x, nullptr, eps, N, D, algo_vd, start_vertex_id, n_points, stream, metric); raft::common::nvtx::pop_range(); } raft::update_host(&curradjlen, vd + n_points, 1, stream); handle.sync_stream(stream); CUML_LOG_DEBUG("--> Computing adjacency graph with %ld nnz.", (unsigned long)curradjlen); raft::common::nvtx::push_range("Trace::Dbscan::AdjGraph"); if (curradjlen > maxadjlen || adj_graph.data() == NULL) { maxadjlen = curradjlen; adj_graph.resize(maxadjlen, stream); } AdjGraph::run<Index_>(handle, adj, vd, adj_graph.data(), curradjlen, ex_scan, N, algo_adj, n_points, row_counters, stream); raft::common::nvtx::pop_range(); CUML_LOG_DEBUG("--> Computing connected components"); raft::common::nvtx::push_range("Trace::Dbscan::WeakCC"); raft::sparse::weak_cc_batched<Index_>( i == 0 ? labels : labels_temp, ex_scan, adj_graph.data(), curradjlen, N, start_vertex_id, n_points, &state, stream, [core_pts, N] __device__(Index_ global_id) { return global_id < N ? __ldg((char*)core_pts + global_id) : 0; }); raft::common::nvtx::pop_range(); if (i > 0) { // The labels_temp array contains the labelling for the neighborhood // graph of the current batch. This needs to be merged with the labelling // created by the previous batches. // Using the labelling from the previous batches as initial value for // weak_cc_batched and skipping the merge step would lead to incorrect // results as described in #3094. CUML_LOG_DEBUG("--> Accumulating labels"); raft::common::nvtx::push_range("Trace::Dbscan::MergeLabels"); MergeLabels::run<Index_>(handle, labels, labels_temp, core_pts, work_buffer, m, N, stream); raft::common::nvtx::pop_range(); } } // Combine the results in the multi-node multi-GPU case if (opg) MergeLabels::tree_reduction(handle, labels, labels_temp, core_pts, work_buffer, m, N, stream); /// TODO: optional minimalization step for border points // Final relabel if (my_rank == 0) { raft::common::nvtx::push_range("Trace::Dbscan::FinalRelabel"); if (algo_ccl == 2) final_relabel(labels, N, stream); std::size_t nblks = raft::ceildiv<std::size_t>(N, TPB); relabelForSkl<Index_><<<nblks, TPB, 0, stream>>>(labels, N, MAX_LABEL); RAFT_CUDA_TRY(cudaPeekAtLastError()); raft::common::nvtx::pop_range(); // Calculate the core_indices only if an array was passed in if (core_indices != nullptr) { raft::common::nvtx::range fun_scope("Trace::Dbscan::CoreSampleIndices"); // Create the execution policy auto thrust_exec_policy = handle.get_thrust_policy(); // Get wrappers for the device ptrs thrust::device_ptr<bool> dev_core_pts = thrust::device_pointer_cast(core_pts); thrust::device_ptr<Index_> dev_core_indices = thrust::device_pointer_cast(core_indices); // First fill the core_indices with -1 which will be used if core_point_count < N thrust::fill_n(thrust_exec_policy, dev_core_indices, N, (Index_)-1); auto index_iterator = thrust::counting_iterator<Index_>(0); // Perform stream reduction on the core points. The core_pts acts as the stencil and we use // thrust::counting_iterator to return the index thrust::copy_if(thrust_exec_policy, index_iterator, index_iterator + N, dev_core_pts, dev_core_indices, [=] __device__(const bool is_core_point) { return is_core_point; }); } } CUML_LOG_DEBUG("Done."); return (std::size_t)0; } } // namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/dbscan/dbscan.cuh
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "runner.cuh" #include <raft/core/nvtx.hpp> #include <cuml/cluster/dbscan.hpp> #include <cuml/common/logger.hpp> #include <algorithm> #include <cstddef> namespace ML { namespace Dbscan { template <typename Index_ = int> size_t compute_batch_size(size_t& estimated_memory, Index_ n_rows, Index_ n_owned_rows, size_t max_mbytes_per_batch = 0, Index_ neigh_per_row = 0) { // In real applications, it's unlikely that the sparse adjacency matrix // comes even close to the worst-case memory usage, because if epsilon // is so large that all points are connected to 10% or even more of other // points, the clusters would probably not be interesting/relevant anymore ///@todo: expose `neigh_per_row` to the user if (neigh_per_row <= 0) neigh_per_row = n_rows; /* Memory needed per batch row: * - Dense adj matrix: n_rows (bool) * - Sparse adj matrix: neigh_per_row (Index_) * - Vertex degrees: 1 (Index_) * - Ex scan: 1 (Index_) */ size_t est_mem_per_row = n_rows * sizeof(bool) + (neigh_per_row + 2) * sizeof(Index_); /* Memory needed regardless of the batch size: * - Temporary labels: n_rows (Index_) * - Core point mask: n_rows (bool) */ size_t est_mem_fixed = n_rows * (sizeof(Index_) + sizeof(bool)); // The rest will be so small that it should fit into what we have left over // from the over-estimation of the sparse adjacency matrix // Batch size determined based on available memory ASSERT(est_mem_per_row > 0, "Estimated memory per row is 0 for DBSCAN"); size_t batch_size = (max_mbytes_per_batch * 1000000 - est_mem_fixed) / est_mem_per_row; // Limit batch size to number of owned rows batch_size = std::min((size_t)n_owned_rows, batch_size); // To avoid overflow, we need: batch_size <= MAX_LABEL / n_rows (floor div) Index_ MAX_LABEL = std::numeric_limits<Index_>::max(); if (batch_size > static_cast<std::size_t>(MAX_LABEL / n_rows)) { Index_ new_batch_size = MAX_LABEL / n_rows; CUML_LOG_WARN( "Batch size limited by the chosen integer type (%d bytes). %d -> %d. " "Using the larger integer type might result in better performance", (int)sizeof(Index_), (int)batch_size, (int)new_batch_size); batch_size = new_batch_size; } // Warn when a smaller index type could be used if ((sizeof(Index_) > sizeof(int)) && (batch_size < std::numeric_limits<int>::max() / static_cast<std::size_t>(n_rows))) { CUML_LOG_WARN( "You are using an index type of size (%d bytes) but a smaller index " "type (%d bytes) would be sufficient. Using the smaller integer type " "might result in better performance.", (int)sizeof(Index_), (int)sizeof(int)); } estimated_memory = batch_size * est_mem_per_row + est_mem_fixed; return batch_size; } template <typename T, typename Index_ = int, bool opg = false> void dbscanFitImpl(const raft::handle_t& handle, T* input, Index_ n_rows, Index_ n_cols, T eps, Index_ min_pts, raft::distance::DistanceType metric, Index_* labels, Index_* core_sample_indices, T* sample_weight, size_t max_mbytes_per_batch, cudaStream_t stream, int verbosity) { raft::common::nvtx::range fun_scope("ML::Dbscan::Fit"); ML::Logger::get().setLevel(verbosity); // XXX: for algo_vd and algo_adj, 0 (naive) is no longer an option and has // been removed. int algo_vd = (metric == raft::distance::Precomputed) ? 2 : 1; int algo_adj = 1; int algo_ccl = 2; int my_rank{0}; int n_rank{1}; Index_ start_row{0}; Index_ n_owned_rows{n_rows}; ASSERT(n_rows > 0, "No rows in the input array. DBSCAN cannot be fitted!"); if (opg) { const auto& comm = handle.get_comms(); my_rank = comm.get_rank(); n_rank = comm.get_size(); Index_ rows_per_rank = raft::ceildiv<Index_>(n_rows, n_rank); start_row = my_rank * rows_per_rank; Index_ end_row = min((my_rank + 1) * rows_per_rank, n_rows); n_owned_rows = max(Index_(0), end_row - start_row); // Note: it is possible for a node to have no work in theory. It won't // happen in practice (because n_rows is much greater than n_rank) } CUML_LOG_DEBUG("#%d owns %ld rows", (int)my_rank, (unsigned long)n_owned_rows); // Estimate available memory per batch // Note: we can't rely on the reported free memory. if (max_mbytes_per_batch == 0) { // Query memory information to get the total memory on the device size_t free_memory, total_memory; RAFT_CUDA_TRY(cudaMemGetInfo(&free_memory, &total_memory)); // X can either be a feature matrix or distance matrix size_t dataset_memory = (metric == raft::distance::Precomputed) ? ((size_t)n_rows * (size_t)n_rows * sizeof(T)) : ((size_t)n_rows * (size_t)n_cols * sizeof(T)); // The estimate is: 80% * total - dataset max_mbytes_per_batch = (80 * total_memory / 100 - dataset_memory) / 1e6; CUML_LOG_DEBUG("Dataset memory: %ld MB", (unsigned long long)(dataset_memory / 1e6)); CUML_LOG_DEBUG("Estimated available memory: %ld / %ld MB", (unsigned long long)max_mbytes_per_batch, (unsigned long long)(total_memory / 1e6)); } size_t estimated_memory; size_t batch_size = compute_batch_size<Index_>(estimated_memory, n_rows, n_owned_rows, max_mbytes_per_batch); CUML_LOG_DEBUG("Running batched training (batch size: %ld, estimated: %lf MB)", (unsigned long)batch_size, (double)estimated_memory * 1e-6); size_t workspaceSize = Dbscan::run<T, Index_, opg>(handle, input, n_rows, n_cols, start_row, n_owned_rows, eps, min_pts, labels, core_sample_indices, sample_weight, algo_vd, algo_adj, algo_ccl, NULL, batch_size, stream, metric); CUML_LOG_DEBUG("Workspace size: %lf MB", (double)workspaceSize * 1e-6); rmm::device_uvector<char> workspace(workspaceSize, stream); Dbscan::run<T, Index_, opg>(handle, input, n_rows, n_cols, start_row, n_owned_rows, eps, min_pts, labels, core_sample_indices, sample_weight, algo_vd, algo_adj, algo_ccl, workspace.data(), batch_size, stream, metric); } } // namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/dbscan
rapidsai_public_repos/cuml/cpp/src/dbscan/vertexdeg/algo.cuh
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "pack.h" #include <cuda_runtime.h> #include <math.h> #include <raft/neighbors/epsilon_neighborhood.cuh> #include "pack.h" #include <raft/linalg/coalesced_reduction.cuh> #include <raft/linalg/matrix_vector_op.cuh> #include <raft/linalg/norm.cuh> #include <raft/util/device_atomics.cuh> #include <rmm/device_uvector.hpp> namespace ML { namespace Dbscan { namespace VertexDeg { namespace Algo { /** * Calculates the vertex degree array and the epsilon neighborhood adjacency matrix for the batch. */ template <typename value_t, typename index_t = int> void launcher(const raft::handle_t& handle, Pack<value_t, index_t> data, index_t start_vertex_id, index_t batch_size, cudaStream_t stream, raft::distance::DistanceType metric) { // The last position of data.vd is the sum of all elements in this array // (excluding it). Hence, its length is one more than the number of points // Initialize it to zero. index_t* d_nnz = data.vd + batch_size; RAFT_CUDA_TRY(cudaMemsetAsync(d_nnz, 0, sizeof(index_t), stream)); ASSERT(sizeof(index_t) == 4 || sizeof(index_t) == 8, "index_t should be 4 or 8 bytes"); index_t m = data.N; index_t n = min(data.N - start_vertex_id, batch_size); index_t k = data.D; value_t eps2; // Compute adjacency matrix `adj` using Cosine or L2 metric. if (metric == raft::distance::DistanceType::CosineExpanded) { rmm::device_uvector<value_t> rowNorms(m, stream); raft::linalg::rowNorm(rowNorms.data(), data.x, k, m, raft::linalg::NormType::L2Norm, true, stream, [] __device__(value_t in) { return sqrtf(in); }); /* Cast away constness because the output matrix for normalization cannot be of const type. * Input matrix will be modified due to normalization. */ raft::linalg::matrixVectorOp( const_cast<value_t*>(data.x), data.x, rowNorms.data(), k, m, true, true, [] __device__(value_t mat_in, value_t vec_in) { return mat_in / vec_in; }, stream); eps2 = 2 * data.eps; raft::neighbors::epsilon_neighborhood::epsUnexpL2SqNeighborhood<value_t, index_t>( data.adj, nullptr, data.x + start_vertex_id * k, data.x, n, m, k, eps2, stream); /** * Restoring the input matrix after normalization. */ raft::linalg::matrixVectorOp( const_cast<value_t*>(data.x), data.x, rowNorms.data(), k, m, true, true, [] __device__(value_t mat_in, value_t vec_in) { return mat_in * vec_in; }, stream); } else { eps2 = data.eps * data.eps; // 1. The output matrix adj is now an n x m matrix (row-major order) // 2. Do not compute the vertex degree in epsUnexpL2SqNeighborhood (pass a // nullptr) raft::neighbors::epsilon_neighborhood::epsUnexpL2SqNeighborhood<value_t, index_t>( data.adj, nullptr, data.x + start_vertex_id * k, data.x, n, m, k, eps2, stream); } // Reduction of adj to compute the vertex degrees raft::linalg::coalescedReduction<bool, index_t, index_t>( data.vd, data.adj, data.N, batch_size, (index_t)0, stream, false, [] __device__(bool adj_ij, index_t idx) { return static_cast<index_t>(adj_ij); }, raft::Sum<index_t>(), [d_nnz] __device__(index_t degree) { atomicAdd(d_nnz, degree); return degree; }); RAFT_CUDA_TRY(cudaPeekAtLastError()); if (data.weight_sum != nullptr && data.sample_weight != nullptr) { const value_t* sample_weight = data.sample_weight; // Reduction of adj to compute the weighted vertex degrees raft::linalg::coalescedReduction<bool, value_t, index_t>( data.weight_sum, data.adj, data.N, batch_size, (value_t)0, stream, false, [sample_weight] __device__(bool adj_ij, index_t j) { return adj_ij ? sample_weight[j] : (value_t)0; }, raft::Sum<value_t>()); RAFT_CUDA_TRY(cudaPeekAtLastError()); } } } // namespace Algo } // end namespace VertexDeg } // end namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/dbscan
rapidsai_public_repos/cuml/cpp/src/dbscan/vertexdeg/pack.h
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { namespace Dbscan { namespace VertexDeg { template <typename Type, typename Index_> struct Pack { /** * vertex degree array * Last position is the sum of all elements in this array (excluding it) * Hence, its length is one more than the number of points */ Index_* vd; /** weighted vertex degree */ Type* weight_sum; /** the adjacency matrix */ bool* adj; /** input dataset */ const Type* x; /** weighted vertex degree */ const Type* sample_weight; /** epsilon neighborhood thresholding param */ Type eps; /** number of points in the dataset */ Index_ N; /** dataset dimensionality */ Index_ D; /** * @brief reset the output array before calling the actual kernel * @param stream cuda stream where to perform this operation * @param vdlen length of the vertex degree array */ void resetArray(cudaStream_t stream, Index_ vdlen) { RAFT_CUDA_TRY(cudaMemsetAsync(vd, 0, sizeof(Index_) * vdlen, stream)); } }; } // namespace VertexDeg } // namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/dbscan
rapidsai_public_repos/cuml/cpp/src/dbscan/vertexdeg/precomputed.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cub/cub.cuh> #include <cuda_runtime.h> #include <math.h> #include <raft/linalg/coalesced_reduction.cuh> #include <raft/linalg/reduce.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/util/device_atomics.cuh> #include "pack.h" namespace ML { namespace Dbscan { namespace VertexDeg { namespace Precomputed { /** * Calculates the vertex degree array and the epsilon neighborhood adjacency matrix for the batch. */ template <typename value_t, typename index_t = int> void launcher(const raft::handle_t& handle, Pack<value_t, index_t> data, index_t start_vertex_id, index_t batch_size, cudaStream_t stream) { // Regarding index types, a special index type is used here for indices in // the distance matrix due to its dimensions (that are independent of the // batch size) using long_index_t = long long int; // The last position of data.vd is the sum of all elements in this array // (excluding it). Hence, its length is one more than the number of points // Initialize it to zero. index_t* d_nnz = data.vd + batch_size; RAFT_CUDA_TRY(cudaMemsetAsync(d_nnz, 0, sizeof(index_t), stream)); long_index_t N = data.N; long_index_t cur_batch_size = min(data.N - start_vertex_id, batch_size); const value_t& eps = data.eps; raft::linalg::unaryOp<value_t>( data.adj, data.x + (long_index_t)start_vertex_id * N, cur_batch_size * N, [eps] __device__(value_t dist) { return (dist <= eps); }, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); // Reduction of adj to compute the vertex degrees raft::linalg::coalescedReduction<bool, index_t, long_index_t>( data.vd, data.adj, data.N, batch_size, (index_t)0, stream, false, [] __device__(bool adj_ij, long_index_t idx) { return static_cast<index_t>(adj_ij); }, raft::Sum<index_t>(), [d_nnz] __device__(index_t degree) { atomicAdd(d_nnz, degree); return degree; }); RAFT_CUDA_TRY(cudaPeekAtLastError()); if (data.weight_sum != nullptr && data.sample_weight != nullptr) { const value_t* sample_weight = data.sample_weight; // Reduction of adj to compute the weighted vertex degrees raft::linalg::coalescedReduction<bool, value_t, long_index_t>( data.weight_sum, data.adj, data.N, batch_size, (value_t)0, stream, false, [sample_weight] __device__(bool adj_ij, long_index_t j) { return adj_ij ? sample_weight[j] : (value_t)0; }, raft::Sum<value_t>()); RAFT_CUDA_TRY(cudaPeekAtLastError()); } } } // namespace Precomputed } // end namespace VertexDeg } // end namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/dbscan
rapidsai_public_repos/cuml/cpp/src/dbscan/vertexdeg/runner.cuh
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "algo.cuh" #include "pack.h" #include "precomputed.cuh" namespace ML { namespace Dbscan { namespace VertexDeg { template <typename Type_f, typename Index_ = int> void run(const raft::handle_t& handle, bool* adj, Index_* vd, Type_f* wght_sum, const Type_f* x, const Type_f* sample_weight, Type_f eps, Index_ N, Index_ D, int algo, Index_ start_vertex_id, Index_ batch_size, cudaStream_t stream, raft::distance::DistanceType metric) { Pack<Type_f, Index_> data = {vd, wght_sum, adj, x, sample_weight, eps, N, D}; switch (algo) { case 0: ASSERT( false, "Incorrect algo '%d' passed! Naive version of vertexdeg has been removed.", algo); case 1: Algo::launcher<Type_f, Index_>(handle, data, start_vertex_id, batch_size, stream, metric); break; case 2: Precomputed::launcher<Type_f, Index_>(handle, data, start_vertex_id, batch_size, stream); break; default: ASSERT(false, "Incorrect algo passed! '%d'", algo); } } } // namespace VertexDeg } // namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/dbscan
rapidsai_public_repos/cuml/cpp/src/dbscan/mergelabels/tree_reduction.cuh
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "runner.cuh" #include <raft/core/nvtx.hpp> #include <cuml/common/logger.hpp> namespace ML { namespace Dbscan { namespace MergeLabels { /** * Merges to label arrays according to a given core point mask * @param[in] handle raft handle * @param[inout] labels Labels computed by this node * @param[in] labels_temp Buffer to receive labels from another node * @param[in] mask Boolean core point mask * @param[in] work_buffer Working buffer (for R) * @param[in] m Working flag * @param[in] N Number of points in the dataset * @param[in] stream CUDA stream */ template <typename Index_ = int> void tree_reduction(const raft::handle_t& handle, Index_* labels, Index_* labels_temp, const bool* mask, Index_* work_buffer, bool* m, Index_ N, cudaStream_t stream) { const auto& comm = handle.get_comms(); int my_rank = comm.get_rank(); int n_rank = comm.get_size(); raft::comms::request_t request; int s = 1; while (s < n_rank) { CUML_LOG_DEBUG("Tree reduction, s=", s); // Find out whether the node is a receiver / sender / passive bool receiver = my_rank % (2 * s) == 0 && my_rank + s < n_rank; bool sender = my_rank % (2 * s) == s; if (receiver) { CUML_LOG_DEBUG("--> Receive labels (from %d)", my_rank + s); comm.irecv(labels_temp, N, my_rank + s, 0, &request); } else if (sender) { CUML_LOG_DEBUG("--> Send labels (from %d)", my_rank - s); comm.isend(labels, N, my_rank - s, 0, &request); } try { comm.waitall(1, &request); } catch (raft::exception& e) { CUML_LOG_DEBUG("Communication failure"); } if (receiver) { CUML_LOG_DEBUG("--> Merge labels"); raft::common::nvtx::range fun_scope("Trace::Dbscan::MergeLabels"); MergeLabels::run<Index_>(handle, labels, labels_temp, mask, work_buffer, m, N, stream); } s *= 2; } } } // namespace MergeLabels } // namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/dbscan
rapidsai_public_repos/cuml/cpp/src/dbscan/mergelabels/runner.cuh
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/label/merge_labels.cuh> #include <raft/core/handle.hpp> namespace ML { namespace Dbscan { namespace MergeLabels { /** * Merges to label arrays according to a given core point mask * @param[in] handle raft handle * @param[inout] labels_a First input, and output label array (in-place) * @param[in] labels_b Second input label array * @param[in] mask Core point mask * @param[in] work_buffer Working buffer (for R) * @param[in] m Working flag * @param[in] N Number of points in the dataset * @param[in] stream CUDA stream */ template <typename Index_ = int, int TPB_X = 256> void run(const raft::handle_t& handle, Index_* labels_a, const Index_* labels_b, const bool* mask, Index_* work_buffer, bool* m, Index_ N, cudaStream_t stream) { raft::label::merge_labels<Index_, TPB_X>(labels_a, labels_b, mask, work_buffer, m, N, stream); } } // namespace MergeLabels } // namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/dbscan
rapidsai_public_repos/cuml/cpp/src/dbscan/adjgraph/algo.cuh
/* * Copyright (c) 2018-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cooperative_groups.h> #include <thrust/device_ptr.h> #include <thrust/scan.h> #include "pack.h" #include <raft/core/handle.hpp> #include <raft/sparse/convert/csr.cuh> #include <raft/util/cuda_utils.cuh> #include <rmm/device_uvector.hpp> namespace ML { namespace Dbscan { namespace AdjGraph { namespace Algo { /** * @brief Converts a boolean adjacency matrix into CSR format. * * @tparam[Index_]: indexing arithmetic type * @param[in] handle: raft::handle_t * * @param[in,out] data: A struct containing the adjacency matrix, its number of * columns, and the vertex degrees. * * @param[in] batch_size: The number of rows of the adjacency matrix data.adj * @param row_counters: A pre-allocated temporary buffer on the device. * Must be able to contain at least `batch_size` elements. * @param[in] stream: CUDA stream */ template <typename Index_ = int> void launcher(const raft::handle_t& handle, Pack<Index_> data, Index_ batch_size, Index_* row_counters, cudaStream_t stream) { Index_ num_rows = batch_size; Index_ num_cols = data.N; bool* adj = data.adj; // batch_size x N row-major adjacency matrix // Compute the exclusive scan of the vertex degrees using namespace thrust; device_ptr<Index_> dev_vd = device_pointer_cast(data.vd); device_ptr<Index_> dev_ex_scan = device_pointer_cast(data.ex_scan); thrust::exclusive_scan(handle.get_thrust_policy(), dev_vd, dev_vd + batch_size, dev_ex_scan); raft::sparse::convert::adj_to_csr( handle, adj, data.ex_scan, num_rows, num_cols, row_counters, data.adj_graph); RAFT_CUDA_TRY(cudaPeekAtLastError()); } } // namespace Algo } // namespace AdjGraph } // namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/dbscan
rapidsai_public_repos/cuml/cpp/src/dbscan/adjgraph/pack.h
/* * Copyright (c) 2018-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { namespace Dbscan { namespace AdjGraph { template <typename Index_ = int> struct Pack { /** * vertex degree array * Last position is the sum of all elements in this array (excluding it) * Hence, its length is one more than the number of poTypes */ Index_* vd; /** the adjacency matrix */ bool* adj; /** the adjacency graph */ Index_* adj_graph; Index_ adjnnz; /** exculusive scan generated from vd */ Index_* ex_scan; /** number of points in the dataset */ Index_ N; }; } // namespace AdjGraph } // namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/dbscan
rapidsai_public_repos/cuml/cpp/src/dbscan/adjgraph/runner.cuh
/* * Copyright (c) 2018-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "algo.cuh" #include "pack.h" namespace ML { namespace Dbscan { namespace AdjGraph { template <typename Index_ = int> void run(const raft::handle_t& handle, bool* adj, Index_* vd, Index_* adj_graph, Index_ adjnnz, Index_* ex_scan, Index_ N, int algo, Index_ batch_size, Index_* row_counters, cudaStream_t stream) { Pack<Index_> data = {vd, adj, adj_graph, adjnnz, ex_scan, N}; switch (algo) { case 0: ASSERT( false, "Incorrect algo '%d' passed! Naive version of adjgraph has been removed.", algo); case 1: Algo::launcher<Index_>(handle, data, batch_size, row_counters, stream); break; default: ASSERT(false, "Incorrect algo passed! '%d'", algo); } } } // namespace AdjGraph } // namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/dbscan
rapidsai_public_repos/cuml/cpp/src/dbscan/corepoints/compute.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/handle.hpp> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> namespace ML { namespace Dbscan { namespace CorePoints { /** * Compute the core points from the vertex degrees and min_pts criterion * @param[in] handle cuML handle * @param[in] vd Vertex degrees (optionally weighted) * @param[out] mask Boolean core point mask * @param[in] min_pts Core point criterion * @param[in] start_vertex_id First point of the batch * @param[in] batch_size Batch size * @param[in] stream CUDA stream */ template <typename Values_ = int, typename Index_ = int> void compute(const raft::handle_t& handle, const Values_* vd, bool* mask, Index_ min_pts, Index_ start_vertex_id, Index_ batch_size, cudaStream_t stream) { auto counting = thrust::make_counting_iterator<Index_>(0); thrust::for_each( handle.get_thrust_policy(), counting, counting + batch_size, [=] __device__(Index_ idx) { mask[idx + start_vertex_id] = (Index_)vd[idx] >= min_pts; }); } } // namespace CorePoints } // namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/dbscan
rapidsai_public_repos/cuml/cpp/src/dbscan/corepoints/exchange.cuh
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/handle.hpp> namespace ML { namespace Dbscan { namespace CorePoints { /** * Compute the core points from the vertex degrees and min_pts criterion * @param[in] handle cuML handle * @param[out] mask Boolean core point mask * @param[in] N Number of points * @param[in] start_row Offset for this node * @param[in] stream CUDA stream */ template <typename Index_ = int> void exchange( const raft::handle_t& handle, bool* mask, Index_ N, Index_ start_row, cudaStream_t stream) { const auto& comm = handle.get_comms(); int my_rank = comm.get_rank(); int n_rank = comm.get_size(); // Array with the size of the contribution of each worker Index_ rows_per_rank = raft::ceildiv<Index_>(N, n_rank); std::vector<size_t> recvcounts = std::vector<size_t>(n_rank, rows_per_rank); recvcounts[n_rank - 1] = N - (n_rank - 1) * rows_per_rank; // Array with the displacement of each part std::vector<size_t> displs = std::vector<size_t>(n_rank); for (int i = 0; i < n_rank; i++) displs[i] = i * rows_per_rank; // All-gather operation with variable contribution length comm.allgatherv<char>( (char*)mask + start_row, (char*)mask, recvcounts.data(), displs.data(), stream); ASSERT(comm.sync_stream(stream) == raft::comms::status_t::SUCCESS, "An error occurred in the distributed operation. This can result from " "a failed rank"); } } // namespace CorePoints } // namespace Dbscan } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/hdbscan/condensed_hierarchy.cu
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/label/classlabels.cuh> #include <cub/cub.cuh> #include <cuml/common/logger.hpp> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/op/sort.cuh> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/functional.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/transform_reduce.h> #include <thrust/tuple.h> #include <cuml/cluster/hdbscan.hpp> namespace ML { namespace HDBSCAN { namespace Common { struct TupleComp { template <typename one, typename two> __host__ __device__ bool operator()(const one& t1, const two& t2) { // sort first by each parent, if (thrust::get<0>(t1) < thrust::get<0>(t2)) return true; if (thrust::get<0>(t1) > thrust::get<0>(t2)) return false; // within each parent, sort by each child, if (thrust::get<1>(t1) < thrust::get<1>(t2)) return true; if (thrust::get<1>(t1) > thrust::get<1>(t2)) return false; // then sort by value in descending order return thrust::get<2>(t1) < thrust::get<2>(t2); } }; template <typename value_idx, typename value_t> CondensedHierarchy<value_idx, value_t>::CondensedHierarchy(const raft::handle_t& handle_, size_t n_leaves_) : handle(handle_), n_leaves(n_leaves_), parents(0, handle.get_stream()), children(0, handle.get_stream()), lambdas(0, handle.get_stream()), sizes(0, handle.get_stream()) { } template <typename value_idx, typename value_t> CondensedHierarchy<value_idx, value_t>::CondensedHierarchy(const raft::handle_t& handle_, size_t n_leaves_, int n_edges_, value_idx* parents_, value_idx* children_, value_t* lambdas_, value_idx* sizes_) : handle(handle_), n_leaves(n_leaves_), n_edges(n_edges_), parents(n_edges_, handle.get_stream()), children(n_edges_, handle.get_stream()), lambdas(n_edges_, handle.get_stream()), sizes(n_edges_, handle.get_stream()) { raft::copy(parents.begin(), parents_, n_edges_, handle.get_stream()); raft::copy(children.begin(), children_, n_edges_, handle.get_stream()); raft::copy(lambdas.begin(), lambdas_, n_edges_, handle.get_stream()); raft::copy(sizes.begin(), sizes_, n_edges_, handle.get_stream()); auto parents_ptr = thrust::device_pointer_cast(parents.data()); auto parents_min_max = thrust::minmax_element( thrust::cuda::par.on(handle.get_stream()), parents_ptr, parents_ptr + n_edges); auto min_cluster = *parents_min_max.first; auto max_cluster = *parents_min_max.second; n_clusters = max_cluster - min_cluster + 1; auto sort_keys = thrust::make_zip_iterator(thrust::make_tuple(parents.begin(), children.begin(), sizes.begin())); auto sort_values = thrust::make_zip_iterator(thrust::make_tuple(lambdas.begin())); thrust::sort_by_key(thrust::cuda::par.on(handle.get_stream()), sort_keys, sort_keys + n_edges, sort_values, TupleComp()); } template <typename value_idx, typename value_t> CondensedHierarchy<value_idx, value_t>::CondensedHierarchy( const raft::handle_t& handle_, size_t n_leaves_, int n_edges_, int n_clusters_, rmm::device_uvector<value_idx>&& parents_, rmm::device_uvector<value_idx>&& children_, rmm::device_uvector<value_t>&& lambdas_, rmm::device_uvector<value_idx>&& sizes_) : handle(handle_), n_leaves(n_leaves_), n_edges(n_edges_), n_clusters(n_clusters_), parents(std::move(parents_)), children(std::move(children_)), lambdas(std::move(lambdas_)), sizes(std::move(sizes_)) { } /** * Populates the condensed hierarchy object with the output * from Condense::condense_hierarchy * @param full_parents * @param full_children * @param full_lambdas * @param full_sizes */ template <typename value_idx, typename value_t> void CondensedHierarchy<value_idx, value_t>::condense(value_idx* full_parents, value_idx* full_children, value_t* full_lambdas, value_idx* full_sizes, value_idx size) { auto stream = handle.get_stream(); if (size == -1) size = 4 * (n_leaves - 1) + 2; n_edges = thrust::transform_reduce( thrust::cuda::par.on(stream), full_sizes, full_sizes + size, [=] __device__(value_idx a) { return a != -1; }, 0, thrust::plus<value_idx>()); parents.resize(n_edges, stream); children.resize(n_edges, stream); lambdas.resize(n_edges, stream); sizes.resize(n_edges, stream); auto in = thrust::make_zip_iterator( thrust::make_tuple(full_parents, full_children, full_lambdas, full_sizes)); auto out = thrust::make_zip_iterator( thrust::make_tuple(parents.data(), children.data(), lambdas.data(), sizes.data())); thrust::copy_if(thrust::cuda::par.on(stream), in, in + size, out, [=] __device__(thrust::tuple<value_idx, value_idx, value_t, value_idx> tup) { return thrust::get<3>(tup) != -1; }); // TODO: Avoid the copies here by updating kernel rmm::device_uvector<value_idx> parent_child(n_edges * 2, stream); raft::copy_async(parent_child.begin(), children.begin(), n_edges, stream); raft::copy_async(parent_child.begin() + n_edges, parents.begin(), n_edges, stream); // find n_clusters auto parents_ptr = thrust::device_pointer_cast(parents.data()); auto max_parent = *(thrust::max_element(thrust::cuda::par.on(stream), parents_ptr, parents_ptr + n_edges)); // now invert labels auto invert_op = [max_parent, n_leaves = n_leaves] __device__(auto& x) { return x >= n_leaves ? max_parent - x + n_leaves : x; }; thrust::transform(thrust::cuda::par.on(stream), parent_child.begin(), parent_child.end(), parent_child.begin(), invert_op); raft::label::make_monotonic( parent_child.data(), parent_child.data(), parent_child.size(), stream, true); raft::copy_async(children.begin(), parent_child.begin(), n_edges, stream); raft::copy_async(parents.begin(), parent_child.begin() + n_edges, n_edges, stream); auto parents_min_max = thrust::minmax_element(thrust::cuda::par.on(stream), parents_ptr, parents_ptr + n_edges); auto min_cluster = *parents_min_max.first; auto max_cluster = *parents_min_max.second; n_clusters = max_cluster - min_cluster + 1; auto sort_keys = thrust::make_zip_iterator(thrust::make_tuple(parents.begin(), children.begin(), sizes.begin())); auto sort_values = thrust::make_zip_iterator(thrust::make_tuple(lambdas.begin())); thrust::sort_by_key( thrust::cuda::par.on(stream), sort_keys, sort_keys + n_edges, sort_values, TupleComp()); } }; // namespace Common }; // namespace HDBSCAN }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/hdbscan/prediction_data.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "detail/utils.h" #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <cuml/cluster/hdbscan.hpp> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/op/sort.cuh> #include <algorithm> #include <cmath> #include <thrust/copy.h> #include <thrust/count.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> namespace ML { namespace HDBSCAN { namespace Common { template <typename value_idx, typename value_t> void PredictionData<value_idx, value_t>::allocate(const raft::handle_t& handle, value_idx n_exemplars_, value_idx n_selected_clusters_, value_idx n_edges_) { this->n_exemplars = n_exemplars_; this->n_selected_clusters = n_selected_clusters_; exemplar_idx.resize(n_exemplars, handle.get_stream()); exemplar_label_offsets.resize(n_selected_clusters + 1, handle.get_stream()); selected_clusters.resize(n_selected_clusters, handle.get_stream()); index_into_children.resize(n_edges_ + 1, handle.get_stream()); } /** * Builds an index into the children array of the CondensedHierarchy object. This is useful for * constant time lookups during bottom-up tree traversals in prediction algorithms. It is therefore * an important feature for speed-up in comparison with Scikit-learn Contrib. This is intended for * internal use only and users are not expected to invoke this method. * * @tparam value_idx * @param[in] handle raft handle for resource reuse * @param[in] children children array of condensed hierarchy * @param[in] n_edges number of edges in children array * @param[out] index_into_children index into the children array (size n_edges + 1) */ template <typename value_idx> void build_index_into_children(const raft::handle_t& handle, value_idx* children, value_idx n_edges, value_idx* index_into_children) { auto exec_policy = handle.get_thrust_policy(); auto counting = thrust::make_counting_iterator<value_idx>(0); auto index_op = [index_into_children] __device__(auto t) { index_into_children[thrust::get<0>(t)] = thrust::get<1>(t); return; }; thrust::for_each( exec_policy, thrust::make_zip_iterator(thrust::make_tuple(children, counting)), thrust::make_zip_iterator(thrust::make_tuple(children + n_edges, counting + n_edges)), index_op); } /** * Populates the PredictionData container object. Computes and stores: the indices of exemplar * points sorted by their cluster labels, cluster label offsets of the exemplars and the set of * clusters selected from the cluster tree. * * @param[in] handle raft handle for resource reuse * @param[in] condensed_tree a condensed hierarchy * @param[in] labels Final normalized labels * @param[in] inverse_label_map map of normalized labels to original labels (size n_clusters) * @param[in] n_selected_clusters number of clusters in the final clustering * @param[in] prediction_data PreditionData object */ void generate_prediction_data(const raft::handle_t& handle, CondensedHierarchy<int, float>& condensed_tree, int* labels, int* inverse_label_map, int n_selected_clusters, PredictionData<int, float>& prediction_data) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); auto counting = thrust::make_counting_iterator<int>(0); auto parents = condensed_tree.get_parents(); auto children = condensed_tree.get_children(); auto lambdas = condensed_tree.get_lambdas(); auto n_edges = condensed_tree.get_n_edges(); auto n_clusters = condensed_tree.get_n_clusters(); auto n_leaves = condensed_tree.get_n_leaves(); auto sizes = condensed_tree.get_sizes(); // first compute the death of each cluster in the condensed hierarchy rmm::device_uvector<int> sorted_parents(n_edges, stream); raft::copy_async(sorted_parents.data(), parents, n_edges, stream); rmm::device_uvector<int> sorted_parents_offsets(n_clusters + 1, stream); detail::Utils::parent_csr( handle, condensed_tree, sorted_parents.data(), sorted_parents_offsets.data()); prediction_data.set_n_clusters(handle, n_clusters); // this is to find maximum lambdas of all children under a parent detail::Utils::cub_segmented_reduce( lambdas, prediction_data.get_deaths(), n_clusters, sorted_parents_offsets.data(), stream, cub::DeviceSegmentedReduce::Max<const float*, float*, const int*, const int*>); rmm::device_uvector<int> is_leaf_cluster(n_clusters, stream); thrust::fill(exec_policy, is_leaf_cluster.begin(), is_leaf_cluster.end(), 1); auto leaf_cluster_op = [is_leaf_cluster = is_leaf_cluster.data(), parents, sizes, n_leaves] __device__(auto idx) { if (sizes[idx] > 1) { is_leaf_cluster[parents[idx] - n_leaves] = 0; } return; }; thrust::for_each(exec_policy, counting, counting + n_edges, leaf_cluster_op); rmm::device_uvector<int> is_exemplar(n_leaves, stream); rmm::device_uvector<int> exemplar_idx(n_leaves, stream); rmm::device_uvector<int> exemplar_label_offsets(n_selected_clusters + 1, stream); // classify whether or not a point is an exemplar point using the death values auto exemplar_op = [is_exemplar = is_exemplar.data(), lambdas, is_leaf_cluster = is_leaf_cluster.data(), parents, children, n_leaves, labels, deaths = prediction_data.get_deaths()] __device__(auto idx) { if (children[idx] < n_leaves) { is_exemplar[children[idx]] = (labels[children[idx]] != -1 && is_leaf_cluster[parents[idx] - n_leaves] && lambdas[idx] == deaths[parents[idx] - n_leaves]); return; } }; thrust::for_each(exec_policy, counting, counting + n_edges, exemplar_op); int n_exemplars = thrust::count_if( exec_policy, is_exemplar.begin(), is_exemplar.end(), [] __device__(auto idx) { return idx; }); prediction_data.allocate(handle, n_exemplars, n_selected_clusters, n_edges); auto exemplar_idx_end_ptr = thrust::copy_if( exec_policy, counting, counting + n_leaves, prediction_data.get_exemplar_idx(), [is_exemplar = is_exemplar.data()] __device__(auto idx) { return is_exemplar[idx]; }); // use the exemplar labels to fetch the set of selected clusters from the condensed hierarchy rmm::device_uvector<int> exemplar_labels(n_exemplars, stream); // this uses the original, pre-normalized label by // using the inverse label_map to lookup the original labels from final labels thrust::transform(exec_policy, prediction_data.get_exemplar_idx(), prediction_data.get_exemplar_idx() + n_exemplars, exemplar_labels.data(), [labels, inverse_label_map] __device__(auto idx) { auto label = labels[idx]; if (label != -1) { return inverse_label_map[label]; } return -1; }); thrust::sort_by_key(exec_policy, exemplar_labels.data(), exemplar_labels.data() + n_exemplars, prediction_data.get_exemplar_idx()); // this uses the final, converted values of the labels rmm::device_uvector<int> converted_exemplar_labels(n_exemplars, stream); thrust::transform(exec_policy, prediction_data.get_exemplar_idx(), prediction_data.get_exemplar_idx() + n_exemplars, converted_exemplar_labels.data(), [labels] __device__(auto idx) { return labels[idx]; }); if (n_exemplars > 0) { raft::sparse::convert::sorted_coo_to_csr(converted_exemplar_labels.data(), n_exemplars, prediction_data.get_exemplar_label_offsets(), n_selected_clusters + 1, stream); thrust::transform(exec_policy, prediction_data.get_exemplar_label_offsets(), prediction_data.get_exemplar_label_offsets() + n_selected_clusters, prediction_data.get_selected_clusters(), [exemplar_labels = exemplar_labels.data(), n_leaves] __device__(auto idx) { return exemplar_labels[idx] + n_leaves; }); // build the index into the children array for constant time lookups build_index_into_children(handle, children, n_edges, prediction_data.get_index_into_children()); } } }; // end namespace Common }; // end namespace HDBSCAN }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/hdbscan/runner.h
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/resource/thrust_policy.hpp> #include <raft/util/cudart_utils.hpp> #include <raft/core/handle.hpp> #include <raft/core/kvp.hpp> #include <rmm/device_uvector.hpp> #include <cuml/common/logger.hpp> #include <raft/cluster/detail/agglomerative.cuh> #include <raft/cluster/detail/mst.cuh> #include <raft/sparse/coo.hpp> #include "detail/condense.cuh" #include "detail/extract.cuh" #include "detail/reachability.cuh" #include "detail/soft_clustering.cuh" #include <cuml/cluster/hdbscan.hpp> #include <thrust/device_ptr.h> #include <thrust/extrema.h> #include <thrust/gather.h> #include <thrust/scatter.h> #include <thrust/transform.h> namespace ML { namespace HDBSCAN { /** * Functor with reduction ops for performing fused 1-nn * computation and guaranteeing only cross-component * neighbors are considered. * @tparam value_idx * @tparam value_t */ template <typename value_idx, typename value_t> struct FixConnectivitiesRedOp { value_t* core_dists; value_idx m; DI FixConnectivitiesRedOp() : m(0) {} FixConnectivitiesRedOp(value_t* core_dists_, value_idx m_) : core_dists(core_dists_), m(m_){}; typedef typename raft::KeyValuePair<value_idx, value_t> KVP; DI void operator()(value_idx rit, KVP* out, const KVP& other) const { if (rit < m && other.value < std::numeric_limits<value_t>::max()) { value_t core_dist_rit = core_dists[rit]; value_t core_dist_other = max(core_dist_rit, max(core_dists[other.key], other.value)); value_t core_dist_out; if (out->key > -1) { core_dist_out = max(core_dist_rit, max(core_dists[out->key], out->value)); } else { core_dist_out = out->value; } bool smaller = core_dist_other < core_dist_out; out->key = smaller ? other.key : out->key; out->value = smaller ? core_dist_other : core_dist_out; } } DI KVP operator()(value_idx rit, const KVP& a, const KVP& b) const { if (rit < m && a.key > -1) { value_t core_dist_rit = core_dists[rit]; value_t core_dist_a = max(core_dist_rit, max(core_dists[a.key], a.value)); value_t core_dist_b; if (b.key > -1) { core_dist_b = max(core_dist_rit, max(core_dists[b.key], b.value)); } else { core_dist_b = b.value; } return core_dist_a < core_dist_b ? KVP(a.key, core_dist_a) : KVP(b.key, core_dist_b); } return b; } DI void init(value_t* out, value_t maxVal) const { *out = maxVal; } DI void init(KVP* out, value_t maxVal) const { out->key = -1; out->value = maxVal; } DI void init_key(value_t& out, value_idx idx) const { return; } DI void init_key(KVP& out, value_idx idx) const { out.key = idx; } DI value_t get_value(KVP& out) const { return out.value; } DI value_t get_value(value_t& out) const { return out; } void gather(const raft::resources& handle, value_idx* map) { auto tmp_core_dists = raft::make_device_vector<value_t>(handle, m); thrust::gather(raft::resource::get_thrust_policy(handle), map, map + m, core_dists, tmp_core_dists.data_handle()); raft::copy_async( core_dists, tmp_core_dists.data_handle(), m, raft::resource::get_cuda_stream(handle)); } void scatter(const raft::resources& handle, value_idx* map) { auto tmp_core_dists = raft::make_device_vector<value_t>(handle, m); thrust::scatter(raft::resource::get_thrust_policy(handle), core_dists, core_dists + m, map, tmp_core_dists.data_handle()); raft::copy_async( core_dists, tmp_core_dists.data_handle(), m, raft::resource::get_cuda_stream(handle)); } }; /** * Constructs a linkage by computing mutual reachability, mst, and * dendrogram. This is shared by HDBSCAN and Robust Single Linkage * since the two algorithms differ only in the cluster * selection and extraction. * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource reuse * @param[in] X data points (size m * n) * @param[in] m number of rows * @param[in] n number of columns * @param[in] metric distance metric to use * @param[in] params hyper parameters * @param[in] core_dists buffer for storing core distances (size m) * @param[out] out output container object */ template <typename value_idx = int64_t, typename value_t = float> void build_linkage(const raft::handle_t& handle, const value_t* X, size_t m, size_t n, raft::distance::DistanceType metric, Common::HDBSCANParams& params, value_t* core_dists, Common::robust_single_linkage_output<value_idx, value_t>& out) { auto stream = handle.get_stream(); /** * Mutual reachability graph */ rmm::device_uvector<value_idx> mutual_reachability_indptr(m + 1, stream); // Note that (min_samples+1) is parsed while allocating space for the COO matrix and to the // mutual_reachability_graph function. This was done to account for self-loops in the knn graph // and be consistent with Scikit learn Contrib. raft::sparse::COO<value_t, value_idx> mutual_reachability_coo(stream, (params.min_samples + 1) * m * 2); detail::Reachability::mutual_reachability_graph(handle, X, (size_t)m, (size_t)n, metric, params.min_samples + 1, params.alpha, mutual_reachability_indptr.data(), core_dists, mutual_reachability_coo); /** * Construct MST sorted by weights */ rmm::device_uvector<value_idx> color(m, stream); FixConnectivitiesRedOp<value_idx, value_t> red_op(core_dists, m); // during knn graph connection raft::cluster::detail::build_sorted_mst(handle, X, mutual_reachability_indptr.data(), mutual_reachability_coo.cols(), mutual_reachability_coo.vals(), m, n, out.get_mst_src(), out.get_mst_dst(), out.get_mst_weights(), color.data(), mutual_reachability_coo.nnz, red_op, metric, (size_t)10); /** * Perform hierarchical labeling */ size_t n_edges = m - 1; raft::cluster::detail::build_dendrogram_host(handle, out.get_mst_src(), out.get_mst_dst(), out.get_mst_weights(), n_edges, out.get_children(), out.get_deltas(), out.get_sizes()); } template <typename value_idx = int64_t, typename value_t = float> void _fit_hdbscan(const raft::handle_t& handle, const value_t* X, size_t m, size_t n, raft::distance::DistanceType metric, Common::HDBSCANParams& params, value_idx* labels, value_t* core_dists, Common::hdbscan_output<value_idx, value_t>& out) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); int min_cluster_size = params.min_cluster_size; build_linkage(handle, X, m, n, metric, params, core_dists, out); /** * Condense branches of tree according to min cluster size */ detail::Condense::build_condensed_hierarchy(handle, out.get_children(), out.get_deltas(), out.get_sizes(), min_cluster_size, m, out.get_condensed_tree()); /** * Extract labels from stability */ rmm::device_uvector<value_t> tree_stabilities(out.get_condensed_tree().get_n_clusters(), handle.get_stream()); rmm::device_uvector<value_idx> label_map(out.get_condensed_tree().get_n_clusters(), handle.get_stream()); value_idx n_selected_clusters = detail::Extract::extract_clusters(handle, out.get_condensed_tree(), m, labels, tree_stabilities.data(), out.get_probabilities(), label_map.data(), params.cluster_selection_method, out._get_inverse_label_map(), params.allow_single_cluster, params.max_cluster_size, params.cluster_selection_epsilon); out.set_n_clusters(n_selected_clusters); auto lambdas_ptr = thrust::device_pointer_cast(out.get_condensed_tree().get_lambdas()); value_t max_lambda = *(thrust::max_element( exec_policy, lambdas_ptr, lambdas_ptr + out.get_condensed_tree().get_n_edges())); detail::Stability::get_stability_scores(handle, labels, tree_stabilities.data(), out.get_condensed_tree().get_n_clusters(), max_lambda, m, out.get_stabilities(), label_map.data()); /** * Normalize labels so they are drawn from a monotonically increasing set * starting at 0 even in the presence of noise (-1) */ thrust::transform(exec_policy, labels, labels + m, out.get_labels(), [label_map = label_map.data()] __device__(value_idx label) { if (label != -1) return label_map[label]; return -1; }); } }; // end namespace HDBSCAN }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/hdbscan/hdbscan.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "detail/condense.cuh" #include "detail/predict.cuh" #include <cuml/cluster/hdbscan.hpp> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include "runner.h" namespace ML { void hdbscan(const raft::handle_t& handle, const float* X, size_t m, size_t n, raft::distance::DistanceType metric, HDBSCAN::Common::HDBSCANParams& params, HDBSCAN::Common::hdbscan_output<int, float>& out, float* core_dists) { rmm::device_uvector<int> labels(m, handle.get_stream()); HDBSCAN::_fit_hdbscan(handle, X, m, n, metric, params, labels.data(), core_dists, out); } void build_condensed_hierarchy(const raft::handle_t& handle, const int* children, const float* delta, const int* sizes, int min_cluster_size, int n_leaves, HDBSCAN::Common::CondensedHierarchy<int, float>& condensed_tree) { HDBSCAN::detail::Condense::build_condensed_hierarchy( handle, children, delta, sizes, min_cluster_size, n_leaves, condensed_tree); } void _extract_clusters(const raft::handle_t& handle, size_t n_leaves, int n_edges, int* parents, int* children, float* lambdas, int* sizes, int* labels, float* probabilities, HDBSCAN::Common::CLUSTER_SELECTION_METHOD cluster_selection_method, bool allow_single_cluster, int max_cluster_size, float cluster_selection_epsilon) { HDBSCAN::Common::CondensedHierarchy condensed_tree( handle, n_leaves, n_edges, parents, children, lambdas, sizes); rmm::device_uvector<float> stabilities(condensed_tree.get_n_clusters(), handle.get_stream()); rmm::device_uvector<int> label_map(condensed_tree.get_n_clusters(), handle.get_stream()); rmm::device_uvector<int> inverse_label_map(0, handle.get_stream()); HDBSCAN::detail::Extract::extract_clusters(handle, condensed_tree, n_leaves, labels, stabilities.data(), probabilities, label_map.data(), cluster_selection_method, inverse_label_map, allow_single_cluster, max_cluster_size, cluster_selection_epsilon); } void compute_all_points_membership_vectors( const raft::handle_t& handle, HDBSCAN::Common::CondensedHierarchy<int, float>& condensed_tree, HDBSCAN::Common::PredictionData<int, float>& prediction_data, const float* X, raft::distance::DistanceType metric, float* membership_vec, size_t batch_size) { HDBSCAN::detail::Predict::all_points_membership_vectors( handle, condensed_tree, prediction_data, X, metric, membership_vec, batch_size); } void compute_membership_vector(const raft::handle_t& handle, HDBSCAN::Common::CondensedHierarchy<int, float>& condensed_tree, HDBSCAN::Common::PredictionData<int, float>& prediction_data, const float* X, const float* points_to_predict, size_t n_prediction_points, int min_samples, raft::distance::DistanceType metric, float* membership_vec, size_t batch_size) { // Note that (min_samples+1) is parsed to the approximate_predict function. This was done for the // core distance computation to consistent with Scikit learn Contrib. HDBSCAN::detail::Predict::membership_vector(handle, condensed_tree, prediction_data, X, points_to_predict, n_prediction_points, metric, min_samples + 1, membership_vec, batch_size); } void out_of_sample_predict(const raft::handle_t& handle, HDBSCAN::Common::CondensedHierarchy<int, float>& condensed_tree, HDBSCAN::Common::PredictionData<int, float>& prediction_data, const float* X, int* labels, const float* points_to_predict, size_t n_prediction_points, raft::distance::DistanceType metric, int min_samples, int* out_labels, float* out_probabilities) { // Note that (min_samples+1) is parsed to the approximate_predict function. This was done for the // core distance computation to consistent with Scikit learn Contrib. HDBSCAN::detail::Predict::approximate_predict(handle, condensed_tree, prediction_data, X, labels, points_to_predict, n_prediction_points, metric, min_samples + 1, out_labels, out_probabilities); } namespace HDBSCAN::HELPER { void compute_core_dists(const raft::handle_t& handle, const float* X, float* core_dists, size_t m, size_t n, raft::distance::DistanceType metric, int min_samples) { HDBSCAN::detail::Reachability::_compute_core_dists<int, float>( handle, X, core_dists, m, n, metric, min_samples); } void compute_inverse_label_map(const raft::handle_t& handle, HDBSCAN::Common::CondensedHierarchy<int, float>& condensed_tree, size_t n_leaves, HDBSCAN::Common::CLUSTER_SELECTION_METHOD cluster_selection_method, rmm::device_uvector<int>& inverse_label_map, bool allow_single_cluster, int max_cluster_size, float cluster_selection_epsilon) { HDBSCAN::detail::Extract::_compute_inverse_label_map(handle, condensed_tree, n_leaves, cluster_selection_method, inverse_label_map, allow_single_cluster, max_cluster_size, cluster_selection_epsilon); } } // end namespace HDBSCAN::HELPER }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/select.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "kernels/select.cuh" #include "utils.h" #include <cub/cub.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/op/sort.cuh> #include <cuml/cluster/hdbscan.hpp> #include <raft/label/classlabels.cuh> #include <algorithm> #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/transform_reduce.h> #include <thrust/tuple.h> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> namespace ML { namespace HDBSCAN { namespace detail { namespace Select { /** * Given a frontier, iteratively performs a breadth-first search, * launching the given kernel at each level. * @tparam value_idx * @tparam Bfs_Kernel * @tparam tpb * @param[in] handle raft handle for resource reuse * @param[in] indptr CSR indptr of children array (size n_clusters+1) * @param[in] children children hierarchy array (size n_clusters) * @param[inout] frontier array storing which nodes need to be processed * in each kernel invocation (size n_clusters) * @param[inout] is_cluster array of cluster selection / deselections (size n_clusters) * @param[in] n_clusters number of clusters * @param[in] bfs_kernel kernel accepting indptr, children, frontier, is_cluster, and n_clusters */ template <typename value_idx, typename Bfs_Kernel, int tpb = 256> void perform_bfs(const raft::handle_t& handle, const value_idx* indptr, const value_idx* children, int* frontier, int* is_cluster, int n_clusters, Bfs_Kernel bfs_kernel) { auto stream = handle.get_stream(); auto thrust_policy = handle.get_thrust_policy(); rmm::device_uvector<int> next_frontier(n_clusters, stream); thrust::fill(thrust_policy, next_frontier.begin(), next_frontier.end(), 0); value_idx n_elements_to_traverse = thrust::reduce(thrust_policy, frontier, frontier + n_clusters, 0); // TODO: Investigate whether it's worth gathering the sparse frontier into // a dense form for purposes of uniform workload/thread scheduling // While frontier is not empty, perform single bfs through tree size_t grid = raft::ceildiv(n_clusters, tpb); while (n_elements_to_traverse > 0) { bfs_kernel<<<grid, tpb, 0, stream>>>( indptr, children, frontier, next_frontier.data(), is_cluster, n_clusters); thrust::copy(thrust_policy, next_frontier.begin(), next_frontier.end(), frontier); thrust::fill(thrust_policy, next_frontier.begin(), next_frontier.end(), 0); n_elements_to_traverse = thrust::reduce(thrust_policy, frontier, frontier + n_clusters, 0); handle.sync_stream(stream); } } /** * Computes a CSR index of parents of cluster tree. CSR index is * created by sorting parents by (children, sizes) * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource reuse * @param[inout] cluster_tree cluster tree (condensed hierarchy with all nodes of size > 1) * @param[in] n_clusters number of clusters * @param[out] indptr CSR indptr of parents array after sort */ template <typename value_idx, typename value_t> void parent_csr(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& cluster_tree, value_idx* indptr) { auto stream = handle.get_stream(); auto parents = cluster_tree.get_parents(); auto children = cluster_tree.get_children(); auto sizes = cluster_tree.get_sizes(); auto cluster_tree_edges = cluster_tree.get_n_edges(); auto n_clusters = cluster_tree.get_n_clusters(); if (cluster_tree_edges > 0) { raft::sparse::op::coo_sort(0, 0, cluster_tree_edges, parents, children, sizes, stream); raft::sparse::convert::sorted_coo_to_csr( parents, cluster_tree_edges, indptr, n_clusters + 1, stream); } else { thrust::fill(handle.get_thrust_policy(), indptr, indptr + n_clusters + 1, 0); } } /** * Computes the excess of mass. This is a cluster selection * strategy that iterates upwards from the leaves of the cluster * tree toward the root, selecting clusters based on stabilities and size. * @tparam value_idx * @tparam value_t * @tparam tpb * @param[in] handle raft handle for resource reuse * @param[inout] cluster_tree condensed hierarchy containing only nodes of size > 1 * @param[in] stability an array of nodes from the cluster tree and their * corresponding stabilities * @param[out] is_cluster array of cluster selections / deselections (size n_clusters) * @param[in] n_clusters number of clusters in cluster tree * @param[in] max_cluster_size max number of points in a cluster before * it will be deselected (and children selected) */ template <typename value_idx, typename value_t, int tpb = 256> void excess_of_mass(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& cluster_tree, value_t* stability, int* is_cluster, int n_clusters, value_idx max_cluster_size, bool allow_single_cluster) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); auto cluster_tree_edges = cluster_tree.get_n_edges(); auto parents = cluster_tree.get_parents(); auto children = cluster_tree.get_children(); auto lambdas = cluster_tree.get_lambdas(); auto sizes = cluster_tree.get_sizes(); rmm::device_uvector<value_idx> cluster_sizes(n_clusters, stream); thrust::fill(exec_policy, cluster_sizes.data(), cluster_sizes.data() + cluster_sizes.size(), 0); value_idx* cluster_sizes_ptr = cluster_sizes.data(); auto out = thrust::make_zip_iterator(thrust::make_tuple(parents, children, sizes)); thrust::for_each(exec_policy, out, out + cluster_tree_edges, [=] __device__(const thrust::tuple<value_idx, value_idx, value_idx>& tup) { // if parent is root (0), add to cluster_sizes_ptr if (thrust::get<0>(tup) == 0) cluster_sizes_ptr[0] += thrust::get<2>(tup); cluster_sizes_ptr[thrust::get<1>(tup)] = thrust::get<2>(tup); }); /** * 2. Iterate through each level from leaves back to root. Use the cluster * tree CSR and warp-level reduction to sum stabilities and test whether * or not current cluster should continue to be its own */ std::vector<int> is_cluster_h(n_clusters, true); // setting the selection of root is_cluster_h[0] = allow_single_cluster; std::vector<int> frontier_h(n_clusters, false); std::vector<value_idx> cluster_sizes_h(n_clusters); rmm::device_uvector<value_idx> indptr(n_clusters + 1, stream); parent_csr(handle, cluster_tree, indptr.data()); raft::update_host(cluster_sizes_h.data(), cluster_sizes.data(), cluster_sizes.size(), stream); std::vector<value_idx> indptr_h(indptr.size(), 0); if (cluster_tree_edges > 0) raft::update_host(indptr_h.data(), indptr.data(), indptr.size(), stream); handle.sync_stream(stream); // Loop through stabilities in "reverse topological order" (e.g. reverse sorted order) value_idx tree_top = allow_single_cluster ? 0 : 1; for (value_idx node = n_clusters - 1; node >= tree_top; node--) { value_t node_stability = 0.0; raft::update_host(&node_stability, stability + node, 1, stream); value_t subtree_stability = 0.0; if (indptr_h[node + 1] - indptr_h[node] > 0) { subtree_stability = thrust::transform_reduce( exec_policy, children + indptr_h[node], children + indptr_h[node + 1], [=] __device__(value_idx a) { return stability[a]; }, 0.0, thrust::plus<value_t>()); } if (subtree_stability > node_stability || cluster_sizes_h[node] > max_cluster_size) { // Deselect / merge cluster with children raft::update_device(stability + node, &subtree_stability, 1, stream); is_cluster_h[node] = false; } else { // Mark children to be deselected frontier_h[node] = true; } } /** * 3. Perform BFS through is_cluster, propagating cluster * "deselection" through subtrees */ rmm::device_uvector<int> cluster_propagate(n_clusters, stream); rmm::device_uvector<int> frontier(n_clusters, stream); raft::update_device(is_cluster, is_cluster_h.data(), n_clusters, stream); raft::update_device(frontier.data(), frontier_h.data(), n_clusters, stream); perform_bfs(handle, indptr.data(), children, frontier.data(), is_cluster, n_clusters, propagate_cluster_negation_kernel<value_idx>); } /** * Uses the leaves of the cluster tree as final cluster selections * @tparam value_idx * @tparam value_t * @tparam tpb * @param[in] handle raft handle for resource reuse * @param[inout] cluster_tree condensed hierarchy containing only nodes of size > 1 * @param[out] is_cluster array of cluster selections / deselections (size n_clusters) * @param[in] n_clusters number of clusters in cluster tree */ template <typename value_idx, typename value_t, int tpb = 256> void leaf(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& cluster_tree, int* is_cluster, int n_clusters) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); auto parents = cluster_tree.get_parents(); auto children = cluster_tree.get_children(); auto n_edges = cluster_tree.get_n_edges(); rmm::device_uvector<int> is_parent(n_clusters, stream); thrust::fill(exec_policy, is_parent.begin(), is_parent.end(), false); auto is_parent_op = [is_parent = is_parent.data()] __device__(auto& p) { is_parent[p] = true; }; thrust::for_each(exec_policy, parents, parents + n_edges, is_parent_op); auto is_cluster_op = [is_parent = is_parent.data(), is_cluster = is_cluster] __device__(auto& c) { if (!is_parent[c]) { is_cluster[c] = true; } }; thrust::for_each(exec_policy, children, children + n_edges, is_cluster_op); } /** * Selects clusters based on distance threshold. * @tparam value_idx * @tparam value_t * @tparam tpb * @param[in] handle raft handle for resource reuse * @param[in] cluster_tree condensed hierarchy with nodes of size > 1 * @param[out] is_cluster array of cluster selections / deselections (size n_clusters) * @param[in] n_clusters number of clusters in cluster tree * @param[in] cluster_selection_epsilon distance threshold * @param[in] allow_single_cluster allows a single cluster with noisy datasets * @param[in] n_selected_clusters number of cluster selections in is_cluster */ template <typename value_idx, typename value_t, int tpb = 256> void cluster_epsilon_search(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& cluster_tree, int* is_cluster, const int n_clusters, const value_t cluster_selection_epsilon, const bool allow_single_cluster, const int n_selected_clusters) { auto stream = handle.get_stream(); auto thrust_policy = handle.get_thrust_policy(); auto parents = cluster_tree.get_parents(); auto children = cluster_tree.get_children(); auto lambdas = cluster_tree.get_lambdas(); auto cluster_tree_edges = cluster_tree.get_n_edges(); rmm::device_uvector<int> selected_clusters(n_selected_clusters, stream); // copying selected clusters by index thrust::copy_if(thrust_policy, thrust::make_counting_iterator(value_idx(0)), thrust::make_counting_iterator(n_clusters), is_cluster, selected_clusters.data(), [] __device__(auto cluster) { return cluster; }); // sort lambdas and parents by children for epsilon search auto start = thrust::make_zip_iterator(thrust::make_tuple(parents, lambdas)); thrust::sort_by_key(thrust_policy, children, children + cluster_tree_edges, start); rmm::device_uvector<value_t> eps(cluster_tree_edges, stream); thrust::transform( thrust_policy, lambdas, lambdas + cluster_tree_edges, eps.begin(), [] __device__(auto x) { return 1 / x; }); // declare frontier and search rmm::device_uvector<int> frontier(n_clusters, stream); thrust::fill(thrust_policy, frontier.begin(), frontier.end(), false); auto nblocks = raft::ceildiv(n_selected_clusters, tpb); cluster_epsilon_search_kernel<<<nblocks, tpb, 0, stream>>>(selected_clusters.data(), n_selected_clusters, parents, children, lambdas, cluster_tree_edges, is_cluster, frontier.data(), n_clusters, cluster_selection_epsilon, allow_single_cluster); rmm::device_uvector<value_idx> indptr(n_clusters + 1, stream); parent_csr(handle, cluster_tree, indptr.data()); perform_bfs(handle, indptr.data(), children, frontier.data(), is_cluster, n_clusters, propagate_cluster_negation_kernel<value_idx>); } /** * Entry point for end-to-end cluster selection logic * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource reuse * @param[in] condensed_tree condensed hierarchy * @param[in] tree_stabilities stabilities array (size n_leaves from condensed hierarchy) * @param[out] is_cluster array of cluster selections / deselections (size n_clusters from condensed * hierarchy) * @param[in] cluster_selection_method method to use for selecting clusters * @param[in] allow_single_cluster whether a single cluster can be selected in noisy conditions * @param[in] max_cluster_size max size cluster to select before selecting children * @param[in] cluster_selection_epsilon distance threshold (0.0 disables distance selection) */ template <typename value_idx, typename value_t> void select_clusters(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, value_t* tree_stabilities, int* is_cluster, Common::CLUSTER_SELECTION_METHOD cluster_selection_method, bool allow_single_cluster, int max_cluster_size, float cluster_selection_epsilon) { auto stream = handle.get_stream(); auto thrust_policy = handle.get_thrust_policy(); auto n_clusters = condensed_tree.get_n_clusters(); auto cluster_tree = Utils::make_cluster_tree(handle, condensed_tree); if (cluster_selection_method == Common::CLUSTER_SELECTION_METHOD::EOM) { Select::excess_of_mass(handle, cluster_tree, tree_stabilities, is_cluster, n_clusters, max_cluster_size, allow_single_cluster); } else { thrust::fill(thrust_policy, is_cluster, is_cluster + n_clusters, false); if (cluster_tree.get_n_edges() > 0) { Select::leaf(handle, cluster_tree, is_cluster, n_clusters); } } auto n_selected_clusters = thrust::reduce(thrust_policy, is_cluster, is_cluster + n_clusters); // this variable is only used when cluster_selection_epsilon != 0.0 auto epsilon_search = true; if (cluster_selection_method == Common::CLUSTER_SELECTION_METHOD::LEAF) { // TODO: re-enable to match reference implementation // It's a confirmed bug https://github.com/scikit-learn-contrib/hdbscan/issues/476 // if no cluster leaves were found, declare root as cluster // if (n_selected_clusters == 0 && allow_single_cluster) { // constexpr int root_is_cluster = true; // raft::update_device(is_cluster, &root_is_cluster, 1, stream); // epsilon_search = false; // } } if (cluster_selection_epsilon != 0.0 && cluster_tree.get_n_edges() > 0) { // no epsilon search if no clusters were selected if (n_selected_clusters == 0) { epsilon_search = false; } // this is to check when eom finds root as only cluster // in which case, epsilon search is cancelled if (cluster_selection_method == Common::CLUSTER_SELECTION_METHOD::EOM) { if (n_selected_clusters == 1) { int is_root_only_cluster = false; raft::update_host(&is_root_only_cluster, is_cluster, 1, stream); if (is_root_only_cluster && allow_single_cluster) { epsilon_search = false; } } } if (epsilon_search) { Select::cluster_epsilon_search(handle, cluster_tree, is_cluster, n_clusters, cluster_selection_epsilon, allow_single_cluster, n_selected_clusters); } } } }; // namespace Select }; // namespace detail }; // namespace HDBSCAN }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/extract.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "membership.cuh" #include "select.cuh" #include "stabilities.cuh" #include "utils.h" #include <raft/label/classlabels.cuh> #include <cuml/cluster/hdbscan.hpp> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/op/sort.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/for_each.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <cub/cub.cuh> #include <algorithm> #include <cstddef> namespace ML { namespace HDBSCAN { namespace detail { namespace Extract { /** * Union-rank data structure with path compression for * labeling data points based on their farthest ancestors * under root. * * For correctness, it is important that all children are * visited before their parents. * @tparam value_idx */ template <typename value_idx> class TreeUnionFind { public: TreeUnionFind(value_idx size_) : size(size_), data(size_ * 2, 0) { for (int i = 0; i < size; i++) { data[i * 2] = i; } } void perform_union(value_idx x, value_idx y) { value_idx x_root = find(x); value_idx y_root = find(y); if (data[x_root * 2 + 1] < data[y_root * 2 + 1]) data[x_root * 2] = y_root; else if (data[x_root * 2 + 1] > data[y_root * 2 + 1]) data[y_root * 2] = x_root; else { data[y_root * 2] = x_root; data[x_root * 2 + 1] += 1; } } value_idx find(value_idx x) { if (data[x * 2] != x) { data[x * 2] = find(data[x * 2]); } return data[x * 2]; } value_idx* get_data() { return data.data(); } private: value_idx size; std::vector<value_idx> data; }; template <typename value_idx, typename value_t> void do_labelling_on_host(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, std::set<value_idx>& clusters, value_idx n_leaves, bool allow_single_cluster, value_idx* labels, value_t cluster_selection_epsilon) { auto stream = handle.get_stream(); std::vector<value_idx> children_h(condensed_tree.get_n_edges()); std::vector<value_t> lambda_h(condensed_tree.get_n_edges()); std::vector<value_idx> parent_h(condensed_tree.get_n_edges()); raft::update_host( children_h.data(), condensed_tree.get_children(), condensed_tree.get_n_edges(), stream); raft::update_host( parent_h.data(), condensed_tree.get_parents(), condensed_tree.get_n_edges(), stream); raft::update_host( lambda_h.data(), condensed_tree.get_lambdas(), condensed_tree.get_n_edges(), stream); handle.sync_stream(stream); auto parents = thrust::device_pointer_cast(condensed_tree.get_parents()); auto thrust_policy = handle.get_thrust_policy(); value_idx size = *thrust::max_element(thrust_policy, parents, parents + condensed_tree.get_n_edges()); std::vector<value_idx> result(n_leaves); std::vector<value_t> parent_lambdas(size + 1, 0); auto union_find = TreeUnionFind<value_idx>(size + 1); for (int i = 0; i < condensed_tree.get_n_edges(); i++) { value_idx child = children_h[i]; value_idx parent = parent_h[i]; if (clusters.find(child) == clusters.end()) union_find.perform_union(parent, child); parent_lambdas[parent_h[i]] = max(parent_lambdas[parent_h[i]], lambda_h[i]); } value_t inverse_cluster_selection_epsilon; if (cluster_selection_epsilon != 0.0) { inverse_cluster_selection_epsilon = 1 / cluster_selection_epsilon; } for (int i = 0; i < n_leaves; i++) { value_idx cluster = union_find.find(i); if (cluster < n_leaves) result[i] = -1; else if (cluster == n_leaves) { // TODO: Implement the cluster_selection_epsilon / epsilon_search if (clusters.size() == 1 && allow_single_cluster) { auto it = std::find(children_h.begin(), children_h.end(), i); auto child_idx = std::distance(children_h.begin(), it); value_t child_lambda = lambda_h[child_idx]; if (cluster_selection_epsilon != 0) { if (child_lambda >= inverse_cluster_selection_epsilon) { result[i] = cluster - n_leaves; } else { result[i] = -1; } } else if (child_lambda >= parent_lambdas[cluster]) result[i] = cluster - n_leaves; else result[i] = -1; } else { result[i] = -1; } } else { result[i] = cluster - n_leaves; } } raft::update_device(labels, result.data(), n_leaves, stream); } /* @brief Internal function compute inverse_label_map for CPU/GPU interop */ template <typename value_idx, typename value_t> void _compute_inverse_label_map(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, size_t n_leaves, Common::CLUSTER_SELECTION_METHOD cluster_selection_method, rmm::device_uvector<value_idx>& inverse_label_map, bool allow_single_cluster, value_idx max_cluster_size, value_t cluster_selection_epsilon) { auto stream = handle.get_stream(); rmm::device_uvector<value_t> tree_stabilities(condensed_tree.get_n_clusters(), handle.get_stream()); Stability::compute_stabilities(handle, condensed_tree, tree_stabilities.data()); rmm::device_uvector<int> is_cluster(condensed_tree.get_n_clusters(), handle.get_stream()); if (max_cluster_size <= 0) max_cluster_size = n_leaves; // negates the max cluster size Select::select_clusters(handle, condensed_tree, tree_stabilities.data(), is_cluster.data(), cluster_selection_method, allow_single_cluster, max_cluster_size, cluster_selection_epsilon); std::vector<int> is_cluster_h(is_cluster.size()); raft::update_host(is_cluster_h.data(), is_cluster.data(), is_cluster_h.size(), stream); handle.sync_stream(stream); std::set<value_idx> clusters; for (std::size_t i = 0; i < is_cluster_h.size(); i++) { if (is_cluster_h[i] != 0) { clusters.insert(i + n_leaves); } } std::vector<value_idx> inverse_label_map_h(clusters.size(), -1); value_idx i = 0; // creating inverse index between // original and final labels for (const value_idx cluster : clusters) { inverse_label_map_h[i] = cluster - n_leaves; i++; } // resizing is to n_clusters inverse_label_map.resize(clusters.size(), stream); raft::copy( inverse_label_map.data(), inverse_label_map_h.data(), inverse_label_map_h.size(), stream); } /** * Compute cluster stabilities, perform cluster selection, and * label the resulting clusters. In addition, probabilities * are computed and stabilities are normalized into scores. * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource management * @param[in] condensed_tree a condensed hierarchy * @param[in] n_leaves number of data samples * @param[out] labels array of labels on device (size n_leaves) * @param[out] stabilities array of stabilities on device (size n_clusters) * @param[out] probabilities array of probabilities on device (size n_leaves) * @param[out] label_map array mapping condensed label ids to selected label ids (size * n_condensed_trees) * @param[in] cluster_selection_method method to use for cluster selection * @param[out] inverse_label_map array mapping final label ids to condensed label ids, used for * prediction APIs (size n_clusters) * @param[in] allow_single_cluster allows a single cluster to be returned (rather than just noise) * @param[in] max_cluster_size maximum number of points that can be considered in a cluster before * it is split into multiple sub-clusters. * @param[in] cluster_selection_epsilon a distance threshold. clusters below this value will be * merged. */ template <typename value_idx, typename value_t> value_idx extract_clusters(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, size_t n_leaves, value_idx* labels, value_t* tree_stabilities, value_t* probabilities, value_idx* label_map, Common::CLUSTER_SELECTION_METHOD cluster_selection_method, rmm::device_uvector<value_idx>& inverse_label_map, bool allow_single_cluster = false, value_idx max_cluster_size = 0, value_t cluster_selection_epsilon = 0.0) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); Stability::compute_stabilities(handle, condensed_tree, tree_stabilities); rmm::device_uvector<int> is_cluster(condensed_tree.get_n_clusters(), handle.get_stream()); if (max_cluster_size <= 0) max_cluster_size = n_leaves; // negates the max cluster size Select::select_clusters(handle, condensed_tree, tree_stabilities, is_cluster.data(), cluster_selection_method, allow_single_cluster, max_cluster_size, cluster_selection_epsilon); std::vector<int> is_cluster_h(is_cluster.size()); raft::update_host(is_cluster_h.data(), is_cluster.data(), is_cluster_h.size(), stream); handle.sync_stream(stream); std::set<value_idx> clusters; for (std::size_t i = 0; i < is_cluster_h.size(); i++) { if (is_cluster_h[i] != 0) { clusters.insert(i + n_leaves); } } std::vector<value_idx> label_map_h(condensed_tree.get_n_clusters(), -1); std::vector<value_idx> inverse_label_map_h(clusters.size(), -1); value_idx i = 0; // creating forward and inverse index between // original and final labels for (const value_idx cluster : clusters) { label_map_h[cluster - n_leaves] = i; inverse_label_map_h[i] = cluster - n_leaves; i++; } // resizing is to n_clusters inverse_label_map.resize(clusters.size(), stream); raft::copy( inverse_label_map.data(), inverse_label_map_h.data(), inverse_label_map_h.size(), stream); raft::copy(label_map, label_map_h.data(), label_map_h.size(), stream); do_labelling_on_host<value_idx, value_t>(handle, condensed_tree, clusters, n_leaves, allow_single_cluster, labels, cluster_selection_epsilon); Membership::get_probabilities<value_idx, value_t>(handle, condensed_tree, labels, probabilities); return clusters.size(); } }; // end namespace Extract }; // end namespace detail }; // end namespace HDBSCAN }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/soft_clustering.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "kernels/soft_clustering.cuh" #include "select.cuh" #include "utils.h" #include <cuml/common/logger.hpp> #include <cub/cub.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/op/sort.cuh> #include <cuml/cluster/hdbscan.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/distance/distance.cuh> #include <raft/distance/distance_types.hpp> #include <raft/label/classlabels.cuh> #include <raft/linalg/matrix_vector_op.cuh> #include <raft/matrix/argmax.cuh> #include <raft/matrix/matrix.cuh> #include <raft/util/fast_int_div.cuh> #include <algorithm> #include <cmath> #include <limits> #include <thrust/execution_policy.h> #include <thrust/transform.h> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> namespace ML { namespace HDBSCAN { namespace detail { namespace Predict { // Computing distance based membership for points in the original clustering on which the clusterer // was trained and new points outside of the training data. template <typename value_idx, typename value_t> void dist_membership_vector(const raft::handle_t& handle, const value_t* X, const value_t* query, size_t n_queries, size_t n, size_t n_exemplars, value_idx n_selected_clusters, value_idx* exemplar_idx, value_idx* exemplar_label_offsets, value_t* dist_membership_vec, raft::distance::DistanceType metric, size_t batch_size, bool softmax = false) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); rmm::device_uvector<value_t> exemplars_dense(n_exemplars * n, stream); // use the exemplar point indices to obtain the exemplar points as a dense array raft::matrix::copyRows<value_t, value_idx, size_t>( X, n_exemplars, n, exemplars_dense.data(), exemplar_idx, n_exemplars, stream, true); // compute the number of batches based on the batch size value_idx n_batches; n_batches = raft::ceildiv((int)n_queries, (int)batch_size); for (value_idx bid = 0; bid < n_batches; bid++) { value_idx batch_offset = bid * batch_size; value_idx samples_per_batch = min((value_idx)batch_size, (value_idx)n_queries - batch_offset); rmm::device_uvector<value_t> dist(samples_per_batch * n_exemplars, stream); // compute the distances using raft API switch (metric) { case raft::distance::DistanceType::L2SqrtExpanded: raft::distance:: distance<raft::distance::DistanceType::L2SqrtExpanded, value_t, value_t, value_t, int>( handle, query + batch_offset * n, exemplars_dense.data(), dist.data(), samples_per_batch, n_exemplars, n, true); break; case raft::distance::DistanceType::L1: raft::distance::distance<raft::distance::DistanceType::L1, value_t, value_t, value_t, int>( handle, query + batch_offset * n, exemplars_dense.data(), dist.data(), samples_per_batch, n_exemplars, n, true); break; case raft::distance::DistanceType::CosineExpanded: raft::distance:: distance<raft::distance::DistanceType::CosineExpanded, value_t, value_t, value_t, int>( handle, query + batch_offset * n, exemplars_dense.data(), dist.data(), samples_per_batch, n_exemplars, n, true); break; default: RAFT_EXPECTS(false, "Incorrect metric passed!"); } // compute the minimum distances to exemplars of each cluster value_idx n_elements = samples_per_batch * n_selected_clusters; auto min_dist = raft::make_device_vector<value_t, value_idx>(handle, n_elements); auto reduction_op = [dist = dist.data(), batch_offset, divisor = raft::util::FastIntDiv(n_selected_clusters), n_selected_clusters, n_exemplars, exemplar_label_offsets] __device__(auto idx) { auto col = idx % divisor; auto row = idx / divisor; auto start = exemplar_label_offsets[col]; auto end = exemplar_label_offsets[col + 1]; value_t min_val = std::numeric_limits<value_t>::max(); for (value_idx i = start; i < end; i++) { if (dist[row * n_exemplars + i] < min_val) { min_val = dist[row * n_exemplars + i]; } } return min_val; }; raft::linalg::map_offset(handle, min_dist.view(), reduction_op); // Softmax computation is ignored in distance membership if (softmax) { thrust::transform(exec_policy, min_dist.data_handle(), min_dist.data_handle() + samples_per_batch * n_selected_clusters, dist_membership_vec + batch_offset * n_selected_clusters, [=] __device__(value_t val) { if (val != 0) { return value_t(exp(1.0 / val)); } return std::numeric_limits<value_t>::max(); }); } // Transform the distances to obtain membership based on proximity to exemplars else { thrust::transform(exec_policy, min_dist.data_handle(), min_dist.data_handle() + samples_per_batch * n_selected_clusters, dist_membership_vec + batch_offset * n_selected_clusters, [=] __device__(value_t val) { if (val > 0) { return value_t(1.0 / val); } return std::numeric_limits<value_t>::max() / n_selected_clusters; }); } } // Normalize the obtained result to sum to 1.0 Utils::normalize(dist_membership_vec, n_selected_clusters, n_queries, stream); } template <typename value_idx, typename value_t, int tpb = 256> void all_points_outlier_membership_vector( const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, value_t* deaths, value_idx* selected_clusters, value_idx* index_into_children, size_t m, int n_selected_clusters, value_t* merge_heights, value_t* outlier_membership_vec, bool softmax) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); auto parents = condensed_tree.get_parents(); auto children = condensed_tree.get_children(); value_t* lambdas = condensed_tree.get_lambdas(); value_idx n_edges = condensed_tree.get_n_edges(); auto n_clusters = condensed_tree.get_n_clusters(); auto n_leaves = condensed_tree.get_n_leaves(); int n_blocks = raft::ceildiv(int(m * n_selected_clusters), tpb); merge_height_kernel<<<n_blocks, tpb, 0, stream>>>(merge_heights, lambdas, index_into_children, parents, m, n_selected_clusters, raft::util::FastIntDiv(n_selected_clusters), selected_clusters); auto leaf_max_lambdas = raft::make_device_vector<value_t, value_idx>(handle, n_leaves); raft::linalg::map_offset(handle, leaf_max_lambdas.view(), [deaths, parents, index_into_children, n_leaves] __device__(auto idx) { return deaths[parents[index_into_children[idx]] - n_leaves]; }); raft::linalg::matrixVectorOp( outlier_membership_vec, merge_heights, leaf_max_lambdas.data_handle(), n_selected_clusters, (value_idx)m, true, false, [] __device__(value_t mat_in, value_t vec_in) { return exp(-(vec_in + 1e-8) / mat_in); }, //+ 1e-8 to avoid zero lambda stream); if (softmax) { Utils::softmax(handle, outlier_membership_vec, n_selected_clusters, m); } Utils::normalize(outlier_membership_vec, n_selected_clusters, m, stream); } template <typename value_idx, typename value_t, int tpb = 256> void all_points_prob_in_some_cluster(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, value_t* deaths, value_idx* selected_clusters, value_idx* index_into_children, size_t m, value_idx n_selected_clusters, value_t* merge_heights, value_t* prob_in_some_cluster) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); value_t* lambdas = condensed_tree.get_lambdas(); auto n_leaves = condensed_tree.get_n_leaves(); auto n_edges = condensed_tree.get_n_edges(); auto children = condensed_tree.get_children(); auto height_argmax = raft::make_device_vector<value_idx, value_idx>(handle, m); auto merge_heights_view = raft::make_device_matrix_view<const value_t, value_idx, raft::row_major>( merge_heights, (int)m, n_selected_clusters); raft::matrix::argmax(handle, merge_heights_view, height_argmax.view()); auto prob_in_some_cluster_op = [deaths, lambdas, index_into_children, selected_clusters, n_leaves, merge_heights, height_argmax = height_argmax.data_handle(), n_selected_clusters] __device__(auto idx) { value_idx nearest_cluster = height_argmax[idx]; value_t max_lambda = max(lambdas[index_into_children[idx]], deaths[selected_clusters[nearest_cluster] - n_leaves]); return merge_heights[idx * n_selected_clusters + nearest_cluster] / max_lambda; }; raft::linalg::map_offset( handle, raft::make_device_vector_view<value_t, value_idx>(prob_in_some_cluster, m), prob_in_some_cluster_op); } template <typename value_idx, typename value_t, int tpb = 256> void outlier_membership_vector(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, value_t* deaths, value_idx* min_mr_inds, value_t* prediction_lambdas, value_idx* selected_clusters, value_idx* index_into_children, size_t n_prediction_points, int n_selected_clusters, value_t* merge_heights, value_t* outlier_membership_vec, bool softmax) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); auto parents = condensed_tree.get_parents(); auto children = condensed_tree.get_children(); value_t* lambdas = condensed_tree.get_lambdas(); value_idx n_edges = condensed_tree.get_n_edges(); auto n_clusters = condensed_tree.get_n_clusters(); auto n_leaves = condensed_tree.get_n_leaves(); // Using the nearest neighbor indices, compute outlier membership int n_blocks = raft::ceildiv(int(n_prediction_points * n_selected_clusters), tpb); merge_height_kernel<<<n_blocks, tpb, 0, stream>>>(merge_heights, lambdas, prediction_lambdas, min_mr_inds, index_into_children, parents, n_prediction_points, n_selected_clusters, raft::util::FastIntDiv(n_selected_clusters), selected_clusters); // fetch the max lambda of the cluster to which the nearest MR neighbor belongs in the condensed // hierarchy auto nearest_cluster_max_lambda = raft::make_device_vector<value_t, value_idx>(handle, n_prediction_points); raft::linalg::map_offset( handle, nearest_cluster_max_lambda.view(), [deaths, parents, index_into_children, min_mr_inds, n_leaves] __device__(auto idx) { return deaths[parents[index_into_children[min_mr_inds[idx]]] - n_leaves]; }); raft::linalg::matrixVectorOp( outlier_membership_vec, merge_heights, nearest_cluster_max_lambda.data_handle(), n_selected_clusters, (value_idx)n_prediction_points, true, false, [] __device__(value_t mat_in, value_t vec_in) { value_t denominator = vec_in - mat_in; if (denominator <= 0) { denominator = 1e-8; } return vec_in / denominator; }, stream); if (softmax) { Utils::softmax(handle, outlier_membership_vec, n_selected_clusters, n_prediction_points); } Utils::normalize(outlier_membership_vec, n_selected_clusters, n_prediction_points, stream); } template <typename value_idx, typename value_t, int tpb = 256> void prob_in_some_cluster(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, value_t* deaths, value_idx* selected_clusters, value_idx* index_into_children, size_t n_prediction_points, value_idx n_selected_clusters, value_idx* min_mr_indices, value_t* merge_heights, value_t* prediction_lambdas, value_t* prob_in_some_cluster) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); value_t* lambdas = condensed_tree.get_lambdas(); auto n_leaves = condensed_tree.get_n_leaves(); auto n_edges = condensed_tree.get_n_edges(); auto children = condensed_tree.get_children(); auto height_argmax = raft::make_device_vector<value_idx, value_idx>(handle, n_prediction_points); auto merge_heights_view = raft::make_device_matrix_view<const value_t, value_idx, raft::row_major>( merge_heights, (int)n_prediction_points, n_selected_clusters); raft::matrix::argmax(handle, merge_heights_view, height_argmax.view()); auto prob_in_some_cluster_op = [prediction_lambdas, deaths, selected_clusters, n_leaves, merge_heights, height_argmax = height_argmax.data_handle(), n_selected_clusters] __device__(auto idx) { value_idx nearest_cluster = height_argmax[idx]; value_t max_lambda = max(prediction_lambdas[idx], deaths[selected_clusters[nearest_cluster] - n_leaves]) + 1e-8; return merge_heights[idx * n_selected_clusters + nearest_cluster] / max_lambda; }; raft::linalg::map_offset( handle, raft::make_device_vector_view<value_t, value_idx>(prob_in_some_cluster, n_prediction_points), prob_in_some_cluster_op); } /** * Predict soft cluster membership vectors for all points in the original dataset the clusterer was * trained on * * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource reuse * @param[in] condensed_tree a condensed hierarchy * @param[in] prediction_data PredictionData object * @param[in] X all points (size m * n) * @param[in] metric distance metric * @param[out] membership_vec output membership vectors (size m * n_selected_clusters) * @param[in] batch_size batch size to be used while computing distance based memberships */ template <typename value_idx, typename value_t> void all_points_membership_vectors(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, Common::PredictionData<value_idx, value_t>& prediction_data, const value_t* X, raft::distance::DistanceType metric, value_t* membership_vec, size_t batch_size) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); size_t m = prediction_data.n_rows; size_t n = prediction_data.n_cols; if (batch_size > m) batch_size = m; RAFT_EXPECTS(0 < batch_size && batch_size <= m, "Invalid batch_size. batch_size should be > 0 and <= the number of samples in the " "training data"); auto parents = condensed_tree.get_parents(); auto children = condensed_tree.get_children(); auto lambdas = condensed_tree.get_lambdas(); auto n_edges = condensed_tree.get_n_edges(); auto n_clusters = condensed_tree.get_n_clusters(); auto n_leaves = condensed_tree.get_n_leaves(); value_idx n_selected_clusters = prediction_data.get_n_selected_clusters(); value_t* deaths = prediction_data.get_deaths(); value_idx* selected_clusters = prediction_data.get_selected_clusters(); value_idx* index_into_children = prediction_data.get_index_into_children(); value_idx n_exemplars = prediction_data.get_n_exemplars(); // Compute membership vectors only if the number of selected clusters is non-zero. This is done to // avoid CUDA run-time errors in raft primitives for pairwise distances and other kernel // invocations. if (n_selected_clusters > 0) { rmm::device_uvector<value_t> dist_membership_vec(m * n_selected_clusters, stream); dist_membership_vector(handle, X, X, m, n, n_exemplars, n_selected_clusters, prediction_data.get_exemplar_idx(), prediction_data.get_exemplar_label_offsets(), dist_membership_vec.data(), metric, batch_size); rmm::device_uvector<value_t> merge_heights(m * n_selected_clusters, stream); all_points_outlier_membership_vector(handle, condensed_tree, deaths, selected_clusters, index_into_children, m, n_selected_clusters, merge_heights.data(), membership_vec, true); rmm::device_uvector<value_t> prob_in_some_cluster(m, stream); all_points_prob_in_some_cluster(handle, condensed_tree, deaths, selected_clusters, index_into_children, m, n_selected_clusters, merge_heights.data(), prob_in_some_cluster.data()); thrust::transform(exec_policy, dist_membership_vec.begin(), dist_membership_vec.end(), membership_vec, membership_vec, thrust::multiplies<value_t>()); // Normalize to obtain probabilities conditioned on points belonging to some cluster Utils::normalize(membership_vec, n_selected_clusters, m, stream); // Multiply with probabilities of points belonging to some cluster to obtain joint distribution raft::linalg::matrixVectorOp( membership_vec, membership_vec, prob_in_some_cluster.data(), n_selected_clusters, (value_idx)m, true, false, [] __device__(value_t mat_in, value_t vec_in) { return mat_in * vec_in; }, stream); } } /** * Predict soft cluster membership vectors for new points (not in the training data). * * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource reuse * @param[in] condensed_tree a condensed hierarchy * @param[in] prediction_data PredictionData object * @param[in] X all points (size m * n) * @param[in] points_to_predict input prediction points (size n_prediction_points * n) * @param[in] n_prediction_points number of prediction points * @param[in] metric distance metric * @param[in] min_samples neighborhood size during training (includes self-loop) * @param[out] membership_vec output membership vectors (size n_prediction_points * * n_selected_clusters) * @param[in] batch_size batch size to be used while computing distance based memberships */ template <typename value_idx, typename value_t, int tpb = 256> void membership_vector(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, Common::PredictionData<value_idx, value_t>& prediction_data, const value_t* X, const value_t* points_to_predict, size_t n_prediction_points, raft::distance::DistanceType metric, int min_samples, value_t* membership_vec, size_t batch_size) { RAFT_EXPECTS(metric == raft::distance::DistanceType::L2SqrtExpanded, "Currently only L2 expanded distance is supported"); auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); size_t m = prediction_data.n_rows; size_t n = prediction_data.n_cols; value_idx n_selected_clusters = prediction_data.get_n_selected_clusters(); value_t* deaths = prediction_data.get_deaths(); value_idx* selected_clusters = prediction_data.get_selected_clusters(); value_idx* index_into_children = prediction_data.get_index_into_children(); value_idx n_exemplars = prediction_data.get_n_exemplars(); value_t* lambdas = condensed_tree.get_lambdas(); if (batch_size > n_prediction_points) batch_size = n_prediction_points; RAFT_EXPECTS( 0 < batch_size && batch_size <= n_prediction_points, "Invalid batch_size. batch_size should be > 0 and <= the number of prediction points"); rmm::device_uvector<value_t> dist_membership_vec(n_prediction_points * n_selected_clusters, stream); dist_membership_vector(handle, X, points_to_predict, n_prediction_points, n, n_exemplars, n_selected_clusters, prediction_data.get_exemplar_idx(), prediction_data.get_exemplar_label_offsets(), dist_membership_vec.data(), raft::distance::DistanceType::L2SqrtExpanded, batch_size); auto prediction_lambdas = raft::make_device_vector<value_t, value_idx>(handle, n_prediction_points); rmm::device_uvector<value_idx> min_mr_inds(n_prediction_points, stream); _compute_knn_and_nearest_neighbor(handle, prediction_data, X, points_to_predict, min_samples, n_prediction_points, min_mr_inds.data(), prediction_lambdas.data_handle(), metric); raft::linalg::map_offset( handle, prediction_lambdas.view(), [lambdas, index_into_children, min_mr_inds = min_mr_inds.data(), prediction_lambdas = prediction_lambdas.data_handle()] __device__(auto idx) { value_t neighbor_lambda = lambdas[index_into_children[min_mr_inds[idx]]]; return min(prediction_lambdas[idx], neighbor_lambda); }); rmm::device_uvector<value_t> merge_heights(n_prediction_points * n_selected_clusters, stream); outlier_membership_vector(handle, condensed_tree, deaths, min_mr_inds.data(), prediction_lambdas.data_handle(), selected_clusters, index_into_children, n_prediction_points, n_selected_clusters, merge_heights.data(), membership_vec, true); auto combine_op = [membership_vec, dist_membership_vec = dist_membership_vec.data()] __device__(auto idx) { return pow(membership_vec[idx], 2) * pow(dist_membership_vec[idx], 0.5); }; raft::linalg::map_offset(handle, raft::make_device_vector_view<value_t, value_idx>( membership_vec, n_prediction_points * n_selected_clusters), combine_op); // Normalize to obtain probabilities conditioned on points belonging to some cluster Utils::normalize(membership_vec, n_selected_clusters, n_prediction_points, stream); rmm::device_uvector<value_t> prob_in_some_cluster_(n_prediction_points, stream); prob_in_some_cluster(handle, condensed_tree, deaths, selected_clusters, index_into_children, n_prediction_points, n_selected_clusters, min_mr_inds.data(), merge_heights.data(), prediction_lambdas.data_handle(), prob_in_some_cluster_.data()); // Multiply conditional probabilities with probability of point belonging to some cluster. This // gives the joint distribution. raft::linalg::matrixVectorOp( membership_vec, membership_vec, prob_in_some_cluster_.data(), n_selected_clusters, (value_idx)n_prediction_points, true, false, [] __device__(value_t mat_in, value_t vec_in) { return mat_in * vec_in; }, stream); } }; // namespace Predict }; // namespace detail }; // namespace HDBSCAN }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/stabilities.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "kernels/stabilities.cuh" #include "utils.h" #include <cub/cub.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/op/sort.cuh> #include <cuml/cluster/hdbscan.hpp> #include <raft/label/classlabels.cuh> #include <algorithm> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/tuple.h> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> namespace ML { namespace HDBSCAN { namespace detail { namespace Stability { /** * Computes stability scores which are used for excess of mass cluster * selection. Stabilities are computed over the points in each cluster as the sum * of the lambda (1 / distance) of each point minus the lambda of its parent. * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource reuse * @param[in] condensed_tree condensed hierarchy (size n_points + n_clusters) * @param[out] stabilities output stabilities array (size n_clusters) */ template <typename value_idx, typename value_t> void compute_stabilities(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, value_t* stabilities) { auto parents = condensed_tree.get_parents(); auto children = condensed_tree.get_children(); auto lambdas = condensed_tree.get_lambdas(); auto sizes = condensed_tree.get_sizes(); auto n_edges = condensed_tree.get_n_edges(); auto n_clusters = condensed_tree.get_n_clusters(); auto n_leaves = condensed_tree.get_n_leaves(); auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); rmm::device_uvector<value_idx> sorted_parents(n_edges, stream); raft::copy_async(sorted_parents.data(), parents, n_edges, stream); rmm::device_uvector<value_idx> sorted_parents_offsets(n_edges + 1, stream); Utils::parent_csr(handle, condensed_tree, sorted_parents.data(), sorted_parents_offsets.data()); // This is to consider the case where a child may also be a parent // in which case, births for that parent are initialized to // lambda for that child rmm::device_uvector<value_t> births(n_clusters, stream); thrust::fill(exec_policy, births.begin(), births.end(), 0.0f); auto births_init_op = [n_leaves, children, lambdas, births = births.data()] __device__(const auto& idx) { auto child = children[idx]; if (child >= n_leaves) { births[child - n_leaves] = lambdas[idx]; } }; // this is to find minimum lambdas of all children under a parent rmm::device_uvector<value_t> births_parent_min(n_clusters, stream); thrust::for_each(exec_policy, thrust::make_counting_iterator(value_idx(0)), thrust::make_counting_iterator(n_edges), births_init_op); Utils::cub_segmented_reduce( lambdas, births_parent_min.data() + 1, n_clusters - 1, sorted_parents_offsets.data() + 1, stream, cub::DeviceSegmentedReduce::Min<const value_t*, value_t*, const value_idx*, const value_idx*>); // finally, we find minimum between initialized births where parent=child // and births of parents for their children auto births_zip = thrust::make_zip_iterator(thrust::make_tuple(births.data(), births_parent_min.data())); auto min_op = [] __device__(const thrust::tuple<value_t, value_t>& birth_pair) { auto birth = thrust::get<0>(birth_pair); auto births_parent_min = thrust::get<1>(birth_pair); return birth < births_parent_min ? birth : births_parent_min; }; thrust::transform( exec_policy, births_zip + 1, births_zip + n_clusters, births.begin() + 1, min_op); thrust::fill(exec_policy, stabilities, stabilities + n_clusters, 0.0f); // for each child, calculate summation (lambda[child] - birth[parent]) * sizes[child] stabilities_functor<value_idx, value_t> stabilities_op( stabilities, births.data(), parents, lambdas, sizes, n_leaves); thrust::for_each(exec_policy, thrust::make_counting_iterator(value_idx(0)), thrust::make_counting_iterator(n_edges), stabilities_op); } /** * Computes stability scores for each cluster by normalizing their * stabilities by their sizes and scaling by the lambda of the root. * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource reuse * @param[in] labels labels array (size n_leaves) * @param[in] stability stabilities array (size n_clusters) * @param[in] n_condensed_clusters number of clusters in cluster tree * @param[in] max_lambda maximum lambda of cluster hierarchy * @param[in] n_leaves number of data points (non-clusters) in hierarchy * @param[out] result output stability scores * @param[in] label_map map of original labels to new final labels (size n_leaves) */ template <typename value_idx, typename value_t> void get_stability_scores(const raft::handle_t& handle, const value_idx* labels, const value_t* stability, size_t n_condensed_clusters, value_t max_lambda, size_t n_leaves, value_t* result, value_idx* label_map) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); /** * 1. Populate cluster sizes */ rmm::device_uvector<value_idx> cluster_sizes(n_condensed_clusters, handle.get_stream()); thrust::fill(exec_policy, cluster_sizes.data(), cluster_sizes.data() + cluster_sizes.size(), 0); value_idx* sizes = cluster_sizes.data(); thrust::for_each(exec_policy, labels, labels + n_leaves, [=] __device__(value_idx v) { if (v > -1) atomicAdd(sizes + v, 1); }); /** * Compute stability scores */ auto enumeration = thrust::make_zip_iterator( thrust::make_tuple(thrust::make_counting_iterator(0), cluster_sizes.data())); thrust::for_each(exec_policy, enumeration, enumeration + n_condensed_clusters, [=] __device__(thrust::tuple<value_idx, value_idx> tup) { value_idx size = thrust::get<1>(tup); value_idx c = thrust::get<0>(tup); value_idx out_cluster = label_map[c]; if (out_cluster >= 0) { bool expr = max_lambda == std::numeric_limits<value_t>::max() || max_lambda == 0.0 || size == 0; if (expr) result[out_cluster] = 1.0f; else result[out_cluster] = stability[c] / (size * max_lambda); } }); } }; // namespace Stability }; // namespace detail }; // namespace HDBSCAN }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/utils.h
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cub/cub.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/op/sort.cuh> #include <cuml/cluster/hdbscan.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/label/classlabels.cuh> #include <raft/linalg/matrix_vector_op.cuh> #include <raft/linalg/norm.cuh> #include <algorithm> #include "../condensed_hierarchy.cu" #include <common/fast_int_div.cuh> #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/transform_reduce.h> #include <thrust/tuple.h> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> namespace ML { namespace HDBSCAN { namespace detail { namespace Utils { /** * Invokes a cub segmented reduce function over a CSR data array * using the indptr as segment offsets * @tparam value_idx * @tparam value_t * @tparam CUBReduceFunc * @param[in] in data array (size offsets[n_segments]+1) * @param[out] out output data array (size offsets[n_segmented]+1) * @param[in] n_segments number of segments in offsets array * @param[in] offsets array of segment offsets (size n_segments+1) * @param[in] stream cuda stream for ordering operations * @param[in] cub_reduce_func segmented reduction function */ template <typename value_idx, typename value_t, typename CUBReduceFunc> void cub_segmented_reduce(const value_t* in, value_t* out, int n_segments, const value_idx* offsets, cudaStream_t stream, CUBReduceFunc cub_reduce_func) { rmm::device_uvector<char> d_temp_storage(0, stream); size_t temp_storage_bytes = 0; cub_reduce_func( nullptr, temp_storage_bytes, in, out, n_segments, offsets, offsets + 1, stream, false); d_temp_storage.resize(temp_storage_bytes, stream); cub_reduce_func(d_temp_storage.data(), temp_storage_bytes, in, out, n_segments, offsets, offsets + 1, stream, false); } /** * Constructs a cluster tree from a CondensedHierarchy by * filtering for only entries with cluster size > 1 * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource reuse * @param[in] condensed_tree condensed hierarchy (size n_leaves + n_clusters) * @return a new condensed hierarchy with only entries of size > 1 */ template <typename value_idx, typename value_t> Common::CondensedHierarchy<value_idx, value_t> make_cluster_tree( const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree) { auto stream = handle.get_stream(); auto thrust_policy = handle.get_thrust_policy(); auto parents = condensed_tree.get_parents(); auto children = condensed_tree.get_children(); auto lambdas = condensed_tree.get_lambdas(); auto sizes = condensed_tree.get_sizes(); value_idx cluster_tree_edges = thrust::transform_reduce( thrust_policy, sizes, sizes + condensed_tree.get_n_edges(), [=] __device__(value_idx a) { return a > 1; }, 0, thrust::plus<value_idx>()); // remove leaves from condensed tree rmm::device_uvector<value_idx> cluster_parents(cluster_tree_edges, stream); rmm::device_uvector<value_idx> cluster_children(cluster_tree_edges, stream); rmm::device_uvector<value_t> cluster_lambdas(cluster_tree_edges, stream); rmm::device_uvector<value_idx> cluster_sizes(cluster_tree_edges, stream); auto in = thrust::make_zip_iterator(thrust::make_tuple(parents, children, lambdas, sizes)); auto out = thrust::make_zip_iterator(thrust::make_tuple( cluster_parents.data(), cluster_children.data(), cluster_lambdas.data(), cluster_sizes.data())); thrust::copy_if(thrust_policy, in, in + (condensed_tree.get_n_edges()), sizes, out, [=] __device__(value_idx a) { return a > 1; }); auto n_leaves = condensed_tree.get_n_leaves(); thrust::transform(thrust_policy, cluster_parents.begin(), cluster_parents.end(), cluster_parents.begin(), [n_leaves] __device__(value_idx a) { return a - n_leaves; }); thrust::transform(thrust_policy, cluster_children.begin(), cluster_children.end(), cluster_children.begin(), [n_leaves] __device__(value_idx a) { return a - n_leaves; }); return Common::CondensedHierarchy<value_idx, value_t>(handle, condensed_tree.get_n_leaves(), cluster_tree_edges, condensed_tree.get_n_clusters(), std::move(cluster_parents), std::move(cluster_children), std::move(cluster_lambdas), std::move(cluster_sizes)); } /** * Computes a CSR index of sorted parents of condensed tree. * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource reuse * @param[inout] condensed_tree cluster tree (condensed hierarchy with all nodes of size > 1) * @param[in] sorted_parents parents array sorted * @param[out] indptr CSR indptr of parents array after sort */ template <typename value_idx, typename value_t> void parent_csr(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, value_idx* sorted_parents, value_idx* indptr) { auto stream = handle.get_stream(); auto thrust_policy = handle.get_thrust_policy(); auto children = condensed_tree.get_children(); auto sizes = condensed_tree.get_sizes(); auto n_edges = condensed_tree.get_n_edges(); auto n_leaves = condensed_tree.get_n_leaves(); auto n_clusters = condensed_tree.get_n_clusters(); // 0-index sorted parents by subtracting n_leaves for offsets and birth/stability indexing auto index_op = [n_leaves] __device__(const auto& x) { return x - n_leaves; }; thrust::transform( thrust_policy, sorted_parents, sorted_parents + n_edges, sorted_parents, index_op); raft::sparse::convert::sorted_coo_to_csr(sorted_parents, n_edges, indptr, n_clusters + 1, stream); } template <typename value_idx, typename value_t> void normalize(value_t* data, value_idx n, size_t m, cudaStream_t stream) { rmm::device_uvector<value_t> sums(m, stream); // Compute row sums raft::linalg::rowNorm<value_t, size_t>( sums.data(), data, (size_t)n, m, raft::linalg::L1Norm, true, stream); // Divide vector by row sums (modify in place) raft::linalg::matrixVectorOp( data, const_cast<value_t*>(data), sums.data(), n, (value_idx)m, true, false, [] __device__(value_t mat_in, value_t vec_in) { return mat_in / vec_in; }, stream); } /** * Computes softmax (unnormalized). The input is modified in-place. For numerical stability, the * maximum value of a row is subtracted from the exponent. * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource reuse * @param[in] data input matrix (size m * n) * @param[in] n number of columns * @param[out] m number of rows */ template <typename value_idx, typename value_t> void softmax(const raft::handle_t& handle, value_t* data, value_idx n, size_t m) { rmm::device_uvector<value_t> linf_norm(m, handle.get_stream()); auto data_const_view = raft::make_device_matrix_view<const value_t, value_idx, raft::row_major>(data, (int)m, n); auto data_view = raft::make_device_matrix_view<value_t, value_idx, raft::row_major>(data, (int)m, n); auto linf_norm_const_view = raft::make_device_vector_view<const value_t, value_idx>(linf_norm.data(), (int)m); auto linf_norm_view = raft::make_device_vector_view<value_t, value_idx>(linf_norm.data(), (int)m); raft::linalg::norm(handle, data_const_view, linf_norm_view, raft::linalg::LinfNorm, raft::linalg::Apply::ALONG_ROWS); raft::linalg::matrix_vector_op( handle, data_const_view, linf_norm_const_view, data_view, raft::linalg::Apply::ALONG_COLUMNS, [] __device__(value_t mat_in, value_t vec_in) { return exp(mat_in - vec_in); }); } }; // namespace Utils }; // namespace detail }; // namespace HDBSCAN }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/condense.cuh
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "kernels/condense.cuh" #include <cub/cub.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/op/sort.cuh> #include <cuml/cluster/hdbscan.hpp> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/fill.h> #include <thrust/reduce.h> namespace ML { namespace HDBSCAN { namespace detail { namespace Condense { /** * Condenses a binary single-linkage tree dendrogram in the Scipy hierarchy * format by collapsing subtrees that fall below a minimum cluster size. * * For increased parallelism, the output array sizes are held fixed but * the result will be sparse (e.g. zeros in place of parents who have been * removed / collapsed). This function accepts an empty instance of * `CondensedHierarchy` and invokes the `condense()` function on it to * convert the sparse output arrays into their dense form. * * @tparam value_idx * @tparam value_t * @tparam tpb * @param handle * @param[in] children parents/children from single-linkage dendrogram * @param[in] delta distances from single-linkage dendrogram * @param[in] sizes sizes from single-linkage dendrogram * @param[in] min_cluster_size any subtrees less than this size will be * collapsed. * @param[in] n_leaves number of actual data samples in the dendrogram * @param[out] condensed_tree output dendrogram. will likely no longer be * a binary tree. */ template <typename value_idx, typename value_t, int tpb = 256> void build_condensed_hierarchy(const raft::handle_t& handle, const value_idx* children, const value_t* delta, const value_idx* sizes, int min_cluster_size, int n_leaves, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree) { cudaStream_t stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); // Root is the last edge in the dendrogram int root = 2 * (n_leaves - 1); auto d_ptr = thrust::device_pointer_cast(children); value_idx n_vertices = *(thrust::max_element(exec_policy, d_ptr, d_ptr + root)) + 1; // Prevent potential infinite loop from labeling disconnected // connectivities graph. RAFT_EXPECTS(n_vertices == root, "Multiple components found in MST or MST is invalid. " "Cannot find single-linkage solution. Found %d vertices " "total.", n_vertices); rmm::device_uvector<bool> frontier(root + 1, stream); rmm::device_uvector<bool> next_frontier(root + 1, stream); thrust::fill(exec_policy, frontier.begin(), frontier.end(), false); thrust::fill(exec_policy, next_frontier.begin(), next_frontier.end(), false); // Array to propagate the lambda of subtrees actively being collapsed // through multiple bfs iterations. rmm::device_uvector<value_t> ignore(root + 1, stream); // Propagate labels from root rmm::device_uvector<value_idx> relabel(root + 1, handle.get_stream()); thrust::fill(exec_policy, relabel.begin(), relabel.end(), -1); raft::update_device(relabel.data() + root, &root, 1, handle.get_stream()); // Flip frontier for root constexpr bool start = true; raft::update_device(frontier.data() + root, &start, 1, handle.get_stream()); rmm::device_uvector<value_idx> out_parent((root + 1) * 2, stream); rmm::device_uvector<value_idx> out_child((root + 1) * 2, stream); rmm::device_uvector<value_t> out_lambda((root + 1) * 2, stream); rmm::device_uvector<value_idx> out_size((root + 1) * 2, stream); thrust::fill(exec_policy, out_parent.begin(), out_parent.end(), -1); thrust::fill(exec_policy, out_child.begin(), out_child.end(), -1); thrust::fill(exec_policy, out_lambda.begin(), out_lambda.end(), -1); thrust::fill(exec_policy, out_size.begin(), out_size.end(), -1); thrust::fill(exec_policy, ignore.begin(), ignore.end(), -1); // While frontier is not empty, perform single bfs through tree size_t grid = raft::ceildiv(root + 1, (int)tpb); value_idx n_elements_to_traverse = thrust::reduce(exec_policy, frontier.data(), frontier.data() + root + 1, 0); while (n_elements_to_traverse > 0) { // TODO: Investigate whether it would be worth performing a gather/argmatch in order // to schedule only the number of threads needed. (it might not be worth it) condense_hierarchy_kernel<<<grid, tpb, 0, handle.get_stream()>>>(frontier.data(), next_frontier.data(), ignore.data(), relabel.data(), children, delta, sizes, n_leaves, min_cluster_size, out_parent.data(), out_child.data(), out_lambda.data(), out_size.data()); thrust::copy(exec_policy, next_frontier.begin(), next_frontier.end(), frontier.begin()); thrust::fill(exec_policy, next_frontier.begin(), next_frontier.end(), false); n_elements_to_traverse = thrust::reduce(exec_policy, frontier.data(), frontier.data() + root + 1, 0); handle.sync_stream(stream); } condensed_tree.condense(out_parent.data(), out_child.data(), out_lambda.data(), out_size.data()); } }; // end namespace Condense }; // end namespace detail }; // end namespace HDBSCAN }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/reachability.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/linalg/unary_op.cuh> #include <raft/neighbors/brute_force.cuh> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/linalg/symmetrize.cuh> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <cuml/neighbors/knn.hpp> #include <raft/distance/distance.cuh> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <thrust/tuple.h> namespace ML { namespace HDBSCAN { namespace detail { namespace Reachability { /** * Extract core distances from KNN graph. This is essentially * performing a knn_dists[:,min_pts] * @tparam value_idx data type for integrals * @tparam value_t data type for distance * @tparam tpb block size for kernel * @param[in] knn_dists knn distance array (size n * k) * @param[in] min_samples this neighbor will be selected for core distances * @param[in] n_neighbors the number of neighbors of each point in the knn graph * @param[in] n number of samples * @param[out] out output array (size n) * @param[in] stream stream for which to order cuda operations */ template <typename value_idx, typename value_t, int tpb = 256> void core_distances( value_t* knn_dists, int min_samples, int n_neighbors, size_t n, value_t* out, cudaStream_t stream) { ASSERT(n_neighbors >= min_samples, "the size of the neighborhood should be greater than or equal to min_samples"); auto exec_policy = rmm::exec_policy(stream); auto indices = thrust::make_counting_iterator<value_idx>(0); thrust::transform(exec_policy, indices, indices + n, out, [=] __device__(value_idx row) { return knn_dists[row * n_neighbors + (min_samples - 1)]; }); } /** * Wraps the brute force knn API, to be used for both training and prediction * @tparam value_idx data type for integrals * @tparam value_t data type for distance * @param[in] handle raft handle for resource reuse * @param[in] X input data points (size m * n) * @param[out] inds nearest neighbor indices (size n_search_items * k) * @param[out] dists nearest neighbor distances (size n_search_items * k) * @param[in] m number of rows in X * @param[in] n number of columns in X * @param[in] search_items array of items to search of dimensionality D (size n_search_items * n) * @param[in] n_search_items number of rows in search_items * @param[in] k number of nearest neighbors * @param[in] metric distance metric to use */ template <typename value_idx, typename value_t> void compute_knn(const raft::handle_t& handle, const value_t* X, value_idx* inds, value_t* dists, size_t m, size_t n, const value_t* search_items, size_t n_search_items, int k, raft::distance::DistanceType metric) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); std::vector<value_t*> inputs; inputs.push_back(const_cast<value_t*>(X)); std::vector<int> sizes; sizes.push_back(m); // This is temporary. Once faiss is updated, we should be able to // pass value_idx through to knn. rmm::device_uvector<int64_t> int64_indices(k * n_search_items, stream); // perform knn brute_force_knn(handle, inputs, sizes, n, const_cast<value_t*>(search_items), n_search_items, int64_indices.data(), dists, k, true, true, metric); // convert from current knn's 64-bit to 32-bit. thrust::transform(exec_policy, int64_indices.data(), int64_indices.data() + int64_indices.size(), inds, [] __device__(int64_t in) -> value_idx { return in; }); } /* @brief Internal function for CPU->GPU interop to compute core_dists */ template <typename value_idx, typename value_t> void _compute_core_dists(const raft::handle_t& handle, const value_t* X, value_t* core_dists, size_t m, size_t n, raft::distance::DistanceType metric, int min_samples) { RAFT_EXPECTS(metric == raft::distance::DistanceType::L2SqrtExpanded, "Currently only L2 expanded distance is supported"); auto stream = handle.get_stream(); rmm::device_uvector<value_idx> inds(min_samples * m, stream); rmm::device_uvector<value_t> dists(min_samples * m, stream); // perform knn compute_knn(handle, X, inds.data(), dists.data(), m, n, X, m, min_samples, metric); // Slice core distances (distances to kth nearest neighbor) core_distances<value_idx>(dists.data(), min_samples, min_samples, m, core_dists, stream); } // Functor to post-process distances into reachability space template <typename value_idx, typename value_t> struct ReachabilityPostProcess { DI value_t operator()(value_t value, value_idx row, value_idx col) const { return max(core_dists[col], max(core_dists[row], alpha * value)); } const value_t* core_dists; value_t alpha; }; /** * Given core distances, Fuses computations of L2 distances between all * points, projection into mutual reachability space, and k-selection. * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource reuse * @param[out] out_inds output indices array (size m * k) * @param[out] out_dists output distances array (size m * k) * @param[in] X input data points (size m * n) * @param[in] m number of rows in X * @param[in] n number of columns in X * @param[in] k neighborhood size (includes self-loop) * @param[in] core_dists array of core distances (size m) */ template <typename value_idx, typename value_t> void mutual_reachability_knn_l2(const raft::handle_t& handle, value_idx* out_inds, value_t* out_dists, const value_t* X, size_t m, size_t n, int k, value_t* core_dists, value_t alpha) { // Create a functor to postprocess distances into mutual reachability space // Note that we can't use a lambda for this here, since we get errors like: // `A type local to a function cannot be used in the template argument of the // enclosing parent function (and any parent classes) of an extended __device__ // or __host__ __device__ lambda` auto epilogue = ReachabilityPostProcess<value_idx, value_t>{core_dists, alpha}; auto X_view = raft::make_device_matrix_view(X, m, n); std::vector<raft::device_matrix_view<const value_t, size_t>> index = {X_view}; raft::neighbors::brute_force::knn<value_idx, value_t>( handle, index, X_view, raft::make_device_matrix_view(out_inds, m, static_cast<size_t>(k)), raft::make_device_matrix_view(out_dists, m, static_cast<size_t>(k)), // TODO: expand distance metrics to support more than just L2 distance // https://github.com/rapidsai/cuml/issues/5301 raft::distance::DistanceType::L2SqrtExpanded, std::make_optional<float>(2.0f), std::nullopt, epilogue); } /** * Constructs a mutual reachability graph, which is a k-nearest neighbors * graph projected into mutual reachability space using the following * function for each data point, where core_distance is the distance * to the kth neighbor: max(core_distance(a), core_distance(b), d(a, b)) * * Unfortunately, points in the tails of the pdf (e.g. in sparse regions * of the space) can have very large neighborhoods, which will impact * nearby neighborhoods. Because of this, it's possible that the * radius for points in the main mass, which might have a very small * radius initially, to expand very large. As a result, the initial * knn which was used to compute the core distances may no longer * capture the actual neighborhoods after projection into mutual * reachability space. * * For the experimental version, we execute the knn twice- once * to compute the radii (core distances) and again to capture * the final neighborhoods. Future iterations of this algorithm * will work improve upon this "exact" version, by using * more specialized data structures, such as space-partitioning * structures. It has also been shown that approximate nearest * neighbors can yield reasonable neighborhoods as the * data sizes increase. * * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource reuse * @param[in] X input data points (size m * n) * @param[in] m number of rows in X * @param[in] n number of columns in X * @param[in] metric distance metric to use * @param[in] k neighborhood size * @param[in] min_samples this neighborhood will be selected for core distances * @param[in] alpha weight applied when internal distance is chosen for * mutual reachability (value of 1.0 disables the weighting) * @param[out] indptr CSR indptr of output knn graph (size m + 1) * @param[out] core_dists output core distances array (size m) * @param[out] out COO object, uninitialized on entry, on exit it stores the * (symmetrized) maximum reachability distance for the k nearest * neighbors. */ template <typename value_idx, typename value_t> void mutual_reachability_graph(const raft::handle_t& handle, const value_t* X, size_t m, size_t n, raft::distance::DistanceType metric, int min_samples, value_t alpha, value_idx* indptr, value_t* core_dists, raft::sparse::COO<value_t, value_idx>& out) { RAFT_EXPECTS(metric == raft::distance::DistanceType::L2SqrtExpanded, "Currently only L2 expanded distance is supported"); auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); rmm::device_uvector<value_idx> coo_rows(min_samples * m, stream); rmm::device_uvector<value_idx> inds(min_samples * m, stream); rmm::device_uvector<value_t> dists(min_samples * m, stream); // perform knn compute_knn(handle, X, inds.data(), dists.data(), m, n, X, m, min_samples, metric); // Slice core distances (distances to kth nearest neighbor) core_distances<value_idx>(dists.data(), min_samples, min_samples, m, core_dists, stream); /** * Compute L2 norm */ mutual_reachability_knn_l2( handle, inds.data(), dists.data(), X, m, n, min_samples, core_dists, (value_t)1.0 / alpha); // self-loops get max distance auto coo_rows_counting_itr = thrust::make_counting_iterator<value_idx>(0); thrust::transform(exec_policy, coo_rows_counting_itr, coo_rows_counting_itr + (m * min_samples), coo_rows.data(), [min_samples] __device__(value_idx c) -> value_idx { return c / min_samples; }); raft::sparse::linalg::symmetrize( handle, coo_rows.data(), inds.data(), dists.data(), m, m, min_samples * m, out); raft::sparse::convert::sorted_coo_to_csr(out.rows(), out.nnz, indptr, m + 1, stream); // self-loops get max distance auto transform_in = thrust::make_zip_iterator(thrust::make_tuple(out.rows(), out.cols(), out.vals())); thrust::transform(exec_policy, transform_in, transform_in + out.nnz, out.vals(), [=] __device__(const thrust::tuple<value_idx, value_idx, value_t>& tup) { return thrust::get<0>(tup) == thrust::get<1>(tup) ? std::numeric_limits<value_t>::max() : thrust::get<2>(tup); }); } }; // end namespace Reachability }; // end namespace detail }; // end namespace HDBSCAN }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/membership.cuh
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "kernels/membership.cuh" #include "utils.h" #include <cub/cub.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/op/sort.cuh> #include <cuml/cluster/hdbscan.hpp> #include <raft/label/classlabels.cuh> #include <algorithm> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> namespace ML { namespace HDBSCAN { namespace detail { namespace Membership { // TODO: Compute outlier scores template <typename value_idx, typename value_t> void get_probabilities(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, const value_idx* labels, value_t* probabilities) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); auto parents = condensed_tree.get_parents(); auto children = condensed_tree.get_children(); auto lambdas = condensed_tree.get_lambdas(); auto n_edges = condensed_tree.get_n_edges(); auto n_clusters = condensed_tree.get_n_clusters(); auto n_leaves = condensed_tree.get_n_leaves(); rmm::device_uvector<value_idx> sorted_parents(n_edges, stream); raft::copy_async(sorted_parents.data(), parents, n_edges, stream); rmm::device_uvector<value_idx> sorted_parents_offsets(n_clusters + 1, stream); Utils::parent_csr(handle, condensed_tree, sorted_parents.data(), sorted_parents_offsets.data()); // this is to find maximum lambdas of all children under a parent rmm::device_uvector<value_t> deaths(n_clusters, stream); thrust::fill(exec_policy, deaths.begin(), deaths.end(), 0.0f); Utils::cub_segmented_reduce( lambdas, deaths.data(), n_clusters, sorted_parents_offsets.data(), stream, cub::DeviceSegmentedReduce::Max<const value_t*, value_t*, const value_idx*, const value_idx*>); // Calculate probability per point thrust::fill(exec_policy, probabilities, probabilities + n_leaves, 0.0f); probabilities_functor<value_idx, value_t> probabilities_op( probabilities, deaths.data(), children, lambdas, labels, n_leaves); thrust::for_each(exec_policy, thrust::make_counting_iterator(value_idx(0)), thrust::make_counting_iterator(n_edges), probabilities_op); } }; // namespace Membership }; // namespace detail }; // namespace HDBSCAN }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/predict.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "kernels/predict.cuh" #include "reachability.cuh" #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/transform.h> namespace ML { namespace HDBSCAN { namespace detail { namespace Predict { /** Find the nearest mutual reachability neighbor of a point, and compute the associated lambda value for the point, given the mutual reachability distance to a nearest neighbor. * @tparam value_idx * @tparam value_t * @tparam tpb * @param[in] handle raft handle for resource reuse * @param[in] input_core_dists an array of core distances for all points (size m) * @param[in] prediction_core_dists an array of core distances for all prediction points (size n_prediction_points) * @param[in] knn_dists knn distance array (size n_prediction_points * neighborhood) * @param[in] knn_inds knn indices array (size n_prediction_points * neighborhood) * @param[in] n_prediction_points number of prediction points * @param[in] neighborhood the neighborhood of prediction points * @param[out] min_mr_inds indices of points with the minimum mutual reachability distance (size n_prediction_points) * @param[out] prediction_lambdas lambda values for prediction points (size n_prediction_points) */ template <typename value_idx, typename value_t, int tpb = 256> void _find_neighbor_and_lambda(const raft::handle_t& handle, value_t* input_core_dists, value_t* prediction_core_dists, value_t* knn_dists, value_idx* knn_inds, size_t n_prediction_points, int neighborhood, value_idx* min_mr_inds, value_t* prediction_lambdas) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); // Buffer for storing the minimum mutual reachability distances rmm::device_uvector<value_t> min_mr_dists(n_prediction_points, stream); int n_blocks = raft::ceildiv((int)n_prediction_points, tpb); // get nearest neighbors for each prediction point in mutual reachability space min_mutual_reachability_kernel<<<n_blocks, tpb, 0, stream>>>(input_core_dists, prediction_core_dists, knn_dists, knn_inds, n_prediction_points, neighborhood, min_mr_dists.data(), min_mr_inds); // obtain lambda values from minimum mutual reachability distances thrust::transform(exec_policy, min_mr_dists.data(), min_mr_dists.data() + n_prediction_points, prediction_lambdas, [] __device__(value_t dist) { if (dist > 0) return (1 / dist); return std::numeric_limits<value_t>::max(); }); } /** Return the cluster label (of the original clustering) and membership probability of a new data point. * * @tparam value_idx * @tparam value_t * @tparam tpb * @param[in] handle raft handle for resource reuse * @param[in] condensed_tree condensed hierarchy * @param[in] prediction_data PredictionData object * @param[in] n_prediction_points number of prediction points * @param[in] min_mr_inds indices of points with the minimum mutual reachability distance (size n_prediction_points) * @param[in] prediction_lambdas lambda values for prediction points (size n_prediction_points) * @param[in] labels monotonic labels of all points * @param[out] out_labels output cluster labels * @param[out] out_probabilities output probabilities */ template <typename value_idx, typename value_t, int tpb = 256> void _find_cluster_and_probability(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, Common::PredictionData<value_idx, value_t>& prediction_data, size_t n_prediction_points, value_idx* min_mr_inds, value_t* prediction_lambdas, value_idx* labels, value_idx* out_labels, value_t* out_probabilities) { auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); auto parents = condensed_tree.get_parents(); auto children = condensed_tree.get_children(); value_t* lambdas = condensed_tree.get_lambdas(); auto n_edges = condensed_tree.get_n_edges(); auto n_leaves = condensed_tree.get_n_leaves(); value_t* deaths = prediction_data.get_deaths(); value_idx* selected_clusters = prediction_data.get_selected_clusters(); value_idx* index_into_children = prediction_data.get_index_into_children(); int n_blocks = raft::ceildiv((int)n_prediction_points, tpb); cluster_probability_kernel<<<n_blocks, tpb, 0, stream>>>(min_mr_inds, prediction_lambdas, index_into_children, labels, deaths, selected_clusters, parents, lambdas, n_leaves, n_prediction_points, out_labels, out_probabilities); } // Build the mutual reachability graph and obtain the nearest neighbors for the prediction points. // The KNN and core distances of prediction points are computed here. template <typename value_idx, typename value_t, int tpb = 256> void _compute_knn_and_nearest_neighbor(const raft::handle_t& handle, Common::PredictionData<value_idx, value_t>& prediction_data, const value_t* X, const value_t* points_to_predict, int min_samples, size_t n_prediction_points, value_idx* min_mr_inds, value_t* prediction_lambdas, raft::distance::DistanceType metric) { auto stream = handle.get_stream(); size_t m = prediction_data.n_rows; size_t n = prediction_data.n_cols; value_t* input_core_dists = prediction_data.get_core_dists(); int neighborhood = (min_samples - 1) * 2; rmm::device_uvector<value_idx> inds(neighborhood * n_prediction_points, stream); rmm::device_uvector<value_t> dists(neighborhood * n_prediction_points, stream); rmm::device_uvector<value_t> prediction_core_dists(n_prediction_points, stream); // perform knn Reachability::compute_knn(handle, X, inds.data(), dists.data(), m, n, points_to_predict, n_prediction_points, neighborhood, metric); // Slice core distances (distances to kth nearest neighbor). The index of the neighbor is // consistent with Scikit-learn Contrib Reachability::core_distances<value_idx>(dists.data(), min_samples, neighborhood, n_prediction_points, prediction_core_dists.data(), stream); _find_neighbor_and_lambda(handle, input_core_dists, prediction_core_dists.data(), dists.data(), inds.data(), n_prediction_points, neighborhood, min_mr_inds, prediction_lambdas); } /** * Predict the cluster label and the probability of the label for new points. * The returned labels are those of the original clustering, * and therefore are not (necessarily) the cluster labels that would * be found by clustering the original data combined with * the prediction points, hence the 'approximate' label. * * @tparam value_idx * @tparam value_t * @param[in] handle raft handle for resource reuse * @param[in] condensed_tree a condensed hierarchy * @param[in] prediction_data PredictionData object * @param[in] X input data points (size m * n) * @param[in] labels converted monotonic labels of the input data points * @param[in] points_to_predict input prediction points (size n_prediction_points * n) * @param[in] n_prediction_points number of prediction points * @param[in] metric distance metric * @param[in] min_samples neighborhood size during training (includes self-loop) * @param[out] out_labels output cluster labels * @param[out] out_probabilities output probabilities */ template <typename value_idx, typename value_t, int tpb = 256> void approximate_predict(const raft::handle_t& handle, Common::CondensedHierarchy<value_idx, value_t>& condensed_tree, Common::PredictionData<value_idx, value_t>& prediction_data, const value_t* X, value_idx* labels, const value_t* points_to_predict, size_t n_prediction_points, raft::distance::DistanceType metric, int min_samples, value_idx* out_labels, value_t* out_probabilities) { RAFT_EXPECTS(metric == raft::distance::DistanceType::L2SqrtExpanded, "Currently only L2 expanded distance is supported"); auto stream = handle.get_stream(); auto exec_policy = handle.get_thrust_policy(); // Obtain lambdas for each prediction point using the closest point in mutual reachability space rmm::device_uvector<value_t> prediction_lambdas(n_prediction_points, stream); rmm::device_uvector<value_idx> min_mr_inds(n_prediction_points, stream); _compute_knn_and_nearest_neighbor(handle, prediction_data, X, points_to_predict, min_samples, n_prediction_points, min_mr_inds.data(), prediction_lambdas.data(), metric); // Using the nearest neighbor indices, find the assigned cluster label and probability _find_cluster_and_probability(handle, condensed_tree, prediction_data, n_prediction_points, min_mr_inds.data(), prediction_lambdas.data(), labels, out_labels, out_probabilities); } }; // end namespace Predict }; // end namespace detail }; // end namespace HDBSCAN }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/kernels/select.cuh
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { namespace HDBSCAN { namespace detail { namespace Select { /** * For each non-0 value in frontier, deselects children clusters * and adds children to frontier. * @tparam value_idx * @param[in] indptr CSR indptr of array (size n_clusters+1) * @param[in] children array of children indices (size n_clusters) * @param[inout] frontier frontier array storing which nodes need * processing in each kernel invocation (size n_clusters) * @param[inout] is_cluster array of cluster selections / deselections (size n_clusters) * @param[in] n_clusters number of clusters */ template <typename value_idx> __global__ void propagate_cluster_negation_kernel(const value_idx* indptr, const value_idx* children, int* frontier, int* next_frontier, int* is_cluster, int n_clusters) { int cluster = blockDim.x * blockIdx.x + threadIdx.x; if (cluster < n_clusters && frontier[cluster]) { frontier[cluster] = false; value_idx children_start = indptr[cluster]; value_idx children_stop = indptr[cluster + 1]; for (int i = children_start; i < children_stop; i++) { value_idx child = children[i]; next_frontier[child] = true; is_cluster[child] = false; } } } template <typename value_idx, typename value_t, int tpb = 256> __global__ void cluster_epsilon_search_kernel(const int* selected_clusters, const int n_selected_clusters, const value_idx* parents, const value_idx* children, const value_t* lambdas, const value_idx cluster_tree_edges, int* is_cluster, int* frontier, const int n_clusters, const value_t cluster_selection_epsilon, const bool allow_single_cluster) { auto selected_cluster_idx = threadIdx.x + blockDim.x * blockIdx.x; if (selected_cluster_idx >= n_selected_clusters) { return; } // don't need to process root as a cluster // offsetting for root by subtracting 1 from the cluster // this is because root isn't involved in epsilon search directly // further, it allows the remaining clusters to be 0-index // and directly access from the children/lambda arrays // since parents/lambdas are sorted by children // the relation is: child = child_idx + 1 auto child_idx = selected_clusters[selected_cluster_idx] - 1; if (child_idx == -1) { return; } auto eps = 1 / lambdas[child_idx]; if (eps < cluster_selection_epsilon) { constexpr auto root = 0; value_idx parent; value_t parent_eps; do { parent = parents[child_idx]; if (parent == root) { if (!allow_single_cluster) { // setting parent to actual value of child // by offsetting parent = child_idx + 1; } break; } // again, offsetting for root child_idx = parent - 1; // lambda is picked for where the parent // resides according to where it is a child parent_eps = 1 / lambdas[child_idx]; } while (parent_eps <= cluster_selection_epsilon); frontier[parent] = true; is_cluster[parent] = true; } else { // offset 1 ahead for root frontier[child_idx + 1] = true; } } }; // namespace Select }; // namespace detail }; // namespace HDBSCAN }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/kernels/soft_clustering.cuh
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/util/fast_int_div.cuh> namespace ML { namespace HDBSCAN { namespace detail { namespace Predict { template <typename value_idx, typename value_t, int tpb = 256> __global__ void merge_height_kernel(value_t* heights, value_t* lambdas, value_idx* index_into_children, value_idx* parents, size_t m, value_idx n_selected_clusters, raft::util::FastIntDiv n, value_idx* selected_clusters) { value_idx idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < value_idx(m * n_selected_clusters)) { value_idx row = idx / n_selected_clusters; value_idx col = idx % n_selected_clusters; value_idx right_cluster = selected_clusters[col]; value_idx left_cluster = parents[index_into_children[row]]; bool took_right_parent = false; bool took_left_parent = false; value_idx last_cluster; while (left_cluster != right_cluster) { if (left_cluster > right_cluster) { took_left_parent = true; last_cluster = left_cluster; left_cluster = parents[index_into_children[left_cluster]]; } else { took_right_parent = true; last_cluster = right_cluster; right_cluster = parents[index_into_children[right_cluster]]; } } if (took_left_parent && took_right_parent) { heights[idx] = lambdas[index_into_children[last_cluster]]; } else { heights[idx] = lambdas[index_into_children[row]]; } } } template <typename value_idx, typename value_t, int tpb = 256> __global__ void merge_height_kernel(value_t* heights, value_t* lambdas, value_t* prediction_lambdas, value_idx* min_mr_indices, value_idx* index_into_children, value_idx* parents, size_t n_prediction_points, value_idx n_selected_clusters, raft::util::FastIntDiv n, value_idx* selected_clusters) { value_idx idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < value_idx(n_prediction_points * n_selected_clusters)) { value_idx row = idx / n; value_idx col = idx % n; value_idx right_cluster = selected_clusters[col]; value_idx left_cluster = parents[index_into_children[min_mr_indices[row]]]; bool took_right_parent = false; bool took_left_parent = false; value_idx last_cluster; while (left_cluster != right_cluster) { if (left_cluster > right_cluster) { took_left_parent = true; last_cluster = left_cluster; left_cluster = parents[index_into_children[left_cluster]]; } else { took_right_parent = true; last_cluster = right_cluster; right_cluster = parents[index_into_children[right_cluster]]; } } if (took_left_parent && took_right_parent) { heights[idx] = lambdas[index_into_children[last_cluster]]; } else { heights[idx] = prediction_lambdas[row]; } } } }; // namespace Predict }; // namespace detail }; // namespace HDBSCAN }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/kernels/stabilities.cuh
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { namespace HDBSCAN { namespace detail { namespace Stability { /** * Uses cluster distances, births, and sizes to compute stabilities * which are used for cluster selection. * @tparam value_idx * @tparam value_t */ template <typename value_idx, typename value_t> struct stabilities_functor { public: stabilities_functor(value_t* stabilities_, const value_t* births_, const value_idx* parents_, const value_t* lambdas_, const value_idx* sizes_, const value_idx n_leaves_) : stabilities(stabilities_), births(births_), parents(parents_), lambdas(lambdas_), sizes(sizes_), n_leaves(n_leaves_) { } __device__ void operator()(const int& idx) { auto parent = parents[idx] - n_leaves; atomicAdd(&stabilities[parent], (lambdas[idx] - births[parent]) * sizes[idx]); } private: value_t* stabilities; const value_t *births, *lambdas; const value_idx *parents, *sizes, n_leaves; }; }; // namespace Stability }; // namespace detail }; // namespace HDBSCAN }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/kernels/condense.cuh
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { namespace HDBSCAN { namespace detail { namespace Condense { template <typename value_idx, typename value_t> __device__ inline value_t get_lambda(value_idx node, value_idx num_points, const value_t* deltas) { value_t delta = deltas[node - num_points]; return delta > 0.0 ? 1.0 / delta : std::numeric_limits<value_t>::max(); } /** * Performs a breath-first search for a single level of the dendrogram, which * is a binary tree, and collapses subtrees based on the min_cluster_size. The * Arrays `relabel` and `ignore` are used to track state throughout subsequent * launches of this kernel. Nodes who's ancestors are "reassigned" inherit * the lambda value of their new parent. * * Note: This implementation differs from the reference implementation and * exposes more parallelism by having any collapsed branches directly * inherit the id of the persisted ancestor, rather than having to maintain * a synchronized monotonically increasing counter. In this version, a * renumbering is done afterwards, in parallel. The assumption here is that * a topological sort order should result from sorting the resulting * condensed dendrogram by (cluster size, id). * * * @tparam value_idx * @tparam value_t * @param frontier determines which nodes should be processed in * each iteration. * @param ignore Should be initialized to -1. maintains the lambda * value of the new parent for each child of a subtree * in the process of being collapsed. For example, * ignore[5] = 0.9 means that all children of node w/ * id 5 should be placed in the condensed tree with * parent relabel[5] and lambda=0.9. * * @param relabel relabel[0] should be initialized to root and * propagated to subtrees as they are collapsed. This * array stores the new parent that should be assigned * for all nodes in a subtree that is in the process * of being collapsed. For example, relabel[5] = 9 * means that node with id 5 should be assigned parent * 9 when ignore[5] > -1. * @param hierarchy binary tree dendrogram as renumbered by single-linkage * agglomerative labeling process. * @param deltas array of distances as constructed by the single-linkage * agglomerative labeling process. * @param sizes array of cluster sizes as constructed by the single-linkage * agglomerative labeling process. * @param n_leaves number of non-cluster data points * * @param min_cluster_size while performing a bfs from the root of the * dendrogram, any subtrees below this size will * be collapsed into their parent cluster. * * @param out_parent parents array of output dendrogram. this will no longer * be a binary tree. * @param out_child children array of output dendrogram. this will no longer * be a binary tree. * @param out_lambda lambda array of output dendrogram. * @param out_count children cluster sizes of output dendrogram. */ template <typename value_idx, typename value_t> __global__ void condense_hierarchy_kernel(bool* frontier, bool* next_frontier, value_t* ignore, value_idx* relabel, const value_idx* children, const value_t* deltas, const value_idx* sizes, int n_leaves, int min_cluster_size, value_idx* out_parent, value_idx* out_child, value_t* out_lambda, value_idx* out_count) { int node = blockDim.x * blockIdx.x + threadIdx.x; if (node >= n_leaves * 2 - 1 || !frontier[node]) return; frontier[node] = false; value_t subtree_lambda = ignore[node]; bool should_ignore = subtree_lambda > -1; // TODO: Investigate whether this would be better done w/ an additional // kernel launch // If node is a leaf, add it to the condensed hierarchy if (node < n_leaves) { out_parent[node * 2] = relabel[node]; out_child[node * 2] = node; out_lambda[node * 2] = subtree_lambda; out_count[node * 2] = 1; } // If node is not a leaf, condense its children if necessary else { value_idx left_child = children[(node - n_leaves) * 2]; value_idx right_child = children[((node - n_leaves) * 2) + 1]; // flip frontier for children next_frontier[left_child] = true; next_frontier[right_child] = true; // propagate ignore down to children ignore[left_child] = should_ignore ? subtree_lambda : -1; ignore[right_child] = should_ignore ? subtree_lambda : -1; relabel[left_child] = should_ignore ? relabel[node] : relabel[left_child]; relabel[right_child] = should_ignore ? relabel[node] : relabel[right_child]; value_idx node_relabel = relabel[node]; // TODO: Should be able to remove this nested conditional if (!should_ignore) { value_t lambda_value = get_lambda(node, n_leaves, deltas); int left_count = left_child >= n_leaves ? sizes[left_child - n_leaves] : 1; int right_count = right_child >= n_leaves ? sizes[right_child - n_leaves] : 1; // Consume left or right child as necessary bool left_child_too_small = left_count < min_cluster_size; bool right_child_too_small = right_count < min_cluster_size; // Node can "persist" to the cluster tree only if // both children >= min_cluster_size bool can_persist = !left_child_too_small && !right_child_too_small; relabel[left_child] = !can_persist ? node_relabel : left_child; relabel[right_child] = !can_persist ? node_relabel : right_child; // set ignore for children. This is the node at which the "points underneath fall out" ignore[left_child] = left_child_too_small ? lambda_value : -1; ignore[right_child] = right_child_too_small ? lambda_value : -1; // If both children are large enough, they should be relabeled and // included directly in the output hierarchy. if (can_persist) { // TODO: Could probably pull this out if this conditional becomes // a bottleneck out_parent[node * 2] = node_relabel; out_child[node * 2] = left_child; out_lambda[node * 2] = lambda_value; out_count[node * 2] = left_count; out_parent[node * 2 + 1] = node_relabel; out_child[node * 2 + 1] = right_child; out_lambda[node * 2 + 1] = lambda_value; out_count[node * 2 + 1] = right_count; } } } } }; // end namespace Condense }; // end namespace detail }; // end namespace HDBSCAN }; // end namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/kernels/membership.cuh
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { namespace HDBSCAN { namespace detail { namespace Membership { template <typename value_idx, typename value_t> struct probabilities_functor { public: probabilities_functor(value_t* probabilities_, const value_t* deaths_, const value_idx* children_, const value_t* lambdas_, const value_idx* labels_, const value_idx root_cluster_) : probabilities(probabilities_), deaths(deaths_), children(children_), lambdas(lambdas_), labels(labels_), root_cluster(root_cluster_) { } __device__ void operator()(const value_idx& idx) { auto child = children[idx]; // intermediate nodes if (child >= root_cluster) { return; } auto cluster = labels[child]; // noise if (cluster == -1) { return; } auto cluster_death = deaths[cluster]; auto child_lambda = lambdas[idx]; if (cluster_death == 0.0 || isnan(child_lambda)) { probabilities[child] = 1.0; } else { auto min_lambda = min(child_lambda, cluster_death); probabilities[child] = min_lambda / cluster_death; } } private: value_t* probabilities; const value_t *deaths, *lambdas; const value_idx *children, *labels, root_cluster; }; }; // namespace Membership }; // namespace detail }; // namespace HDBSCAN }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail
rapidsai_public_repos/cuml/cpp/src/hdbscan/detail/kernels/predict.cuh
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace ML { namespace HDBSCAN { namespace detail { namespace Predict { template <typename value_idx, typename value_t> __global__ void min_mutual_reachability_kernel(value_t* input_core_dists, value_t* prediction_core_dists, value_t* pairwise_dists, value_idx* neighbor_indices, size_t n_prediction_points, value_idx neighborhood, value_t* min_mr_dists, value_idx* min_mr_indices) { value_idx idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < value_idx(n_prediction_points)) { value_t min_mr_dist = std::numeric_limits<value_t>::max(); value_idx min_mr_ind = -1; for (int i = 0; i < neighborhood; i++) { value_t mr_dist = prediction_core_dists[idx]; if (input_core_dists[neighbor_indices[idx * neighborhood + i]] > mr_dist) { mr_dist = input_core_dists[neighbor_indices[idx * neighborhood + i]]; } if (pairwise_dists[idx * neighborhood + i] > mr_dist) { mr_dist = pairwise_dists[idx * neighborhood + i]; } if (min_mr_dist > mr_dist) { min_mr_dist = mr_dist; min_mr_ind = neighbor_indices[idx * neighborhood + i]; } } min_mr_dists[idx] = min_mr_dist; min_mr_indices[idx] = min_mr_ind; } return; } template <typename value_idx, typename value_t> __global__ void cluster_probability_kernel(value_idx* min_mr_indices, value_t* prediction_lambdas, value_idx* index_into_children, value_idx* labels, value_t* deaths, value_idx* selected_clusters, value_idx* parents, value_t* lambdas, value_idx n_leaves, size_t n_prediction_points, value_idx* predicted_labels, value_t* cluster_probabilities) { value_idx idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < value_idx(n_prediction_points)) { value_idx cluster_label = labels[min_mr_indices[idx]]; if (cluster_label >= 0 && selected_clusters[cluster_label] > n_leaves && lambdas[index_into_children[selected_clusters[cluster_label]]] < prediction_lambdas[idx]) { predicted_labels[idx] = cluster_label; } else if (cluster_label >= 0 && selected_clusters[cluster_label] == n_leaves) { predicted_labels[idx] = cluster_label; } else { predicted_labels[idx] = -1; } if (predicted_labels[idx] >= 0) { value_t max_lambda = deaths[selected_clusters[cluster_label] - n_leaves]; if (max_lambda > 0) { cluster_probabilities[idx] = (max_lambda < prediction_lambdas[idx] ? max_lambda : prediction_lambdas[idx]) / max_lambda; } else { cluster_probabilities[idx] = 1.0; } } else { cluster_probabilities[idx] = 0.0; } } return; } }; // namespace Predict }; // namespace detail }; // namespace HDBSCAN }; // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/umap/umap.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "runner.cuh" #include <cuml/manifold/common.hpp> #include <cuml/manifold/umap.hpp> #include <cuml/manifold/umapparams.h> #include <raft/core/handle.hpp> #include <raft/util/cuda_utils.cuh> #include <iostream> namespace ML { namespace UMAP { static const int TPB_X = 256; void find_ab(const raft::handle_t& handle, UMAPParams* params) { cudaStream_t stream = handle.get_stream(); UMAPAlgo::find_ab(params, stream); } std::unique_ptr<raft::sparse::COO<float, int>> get_graph( const raft::handle_t& handle, float* X, // input matrix float* y, // labels int n, int d, knn_indices_dense_t* knn_indices, // precomputed indices float* knn_dists, // precomputed distances UMAPParams* params) { auto graph = std::make_unique<raft::sparse::COO<float>>(handle.get_stream()); if (knn_indices != nullptr && knn_dists != nullptr) { CUML_LOG_DEBUG("Calling UMAP::get_graph() with precomputed KNN"); manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float> inputs( knn_indices, knn_dists, y, n, d, params->n_neighbors); if (y != nullptr) { UMAPAlgo::_get_graph_supervised<knn_indices_dense_t, float, manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float>, TPB_X>(handle, inputs, params, graph.get()); } else { UMAPAlgo::_get_graph<knn_indices_dense_t, float, manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float>, TPB_X>(handle, inputs, params, graph.get()); } return graph; } else { manifold_dense_inputs_t<float> inputs(X, y, n, d); if (y != nullptr) { UMAPAlgo:: _get_graph_supervised<knn_indices_dense_t, float, manifold_dense_inputs_t<float>, TPB_X>( handle, inputs, params, graph.get()); } else { UMAPAlgo::_get_graph<knn_indices_dense_t, float, manifold_dense_inputs_t<float>, TPB_X>( handle, inputs, params, graph.get()); } return graph; } } void refine(const raft::handle_t& handle, float* X, int n, int d, raft::sparse::COO<float>* graph, UMAPParams* params, float* embeddings) { CUML_LOG_DEBUG("Calling UMAP::refine() with precomputed KNN"); manifold_dense_inputs_t<float> inputs(X, nullptr, n, d); UMAPAlgo::_refine<knn_indices_dense_t, float, manifold_dense_inputs_t<float>, TPB_X>( handle, inputs, params, graph, embeddings); } void fit(const raft::handle_t& handle, float* X, float* y, int n, int d, knn_indices_dense_t* knn_indices, float* knn_dists, UMAPParams* params, float* embeddings, raft::sparse::COO<float, int>* graph) { if (knn_indices != nullptr && knn_dists != nullptr) { CUML_LOG_DEBUG("Calling UMAP::fit() with precomputed KNN"); manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float> inputs( knn_indices, knn_dists, y, n, d, params->n_neighbors); if (y != nullptr) { UMAPAlgo::_fit_supervised<knn_indices_dense_t, float, manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float>, TPB_X>(handle, inputs, params, embeddings, graph); } else { UMAPAlgo::_fit<knn_indices_dense_t, float, manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float>, TPB_X>(handle, inputs, params, embeddings, graph); } } else { manifold_dense_inputs_t<float> inputs(X, y, n, d); if (y != nullptr) { UMAPAlgo::_fit_supervised<knn_indices_dense_t, float, manifold_dense_inputs_t<float>, TPB_X>( handle, inputs, params, embeddings, graph); } else { UMAPAlgo::_fit<knn_indices_dense_t, float, manifold_dense_inputs_t<float>, TPB_X>( handle, inputs, params, embeddings, graph); } } } void fit_sparse(const raft::handle_t& handle, int* indptr, int* indices, float* data, size_t nnz, float* y, int n, int d, int* knn_indices, float* knn_dists, UMAPParams* params, float* embeddings, raft::sparse::COO<float, int>* graph) { if (knn_indices != nullptr && knn_dists != nullptr) { manifold_precomputed_knn_inputs_t<knn_indices_sparse_t, float> inputs( knn_indices, knn_dists, y, n, d, params->n_neighbors); if (y != nullptr) { UMAPAlgo::_fit_supervised<knn_indices_sparse_t, float, manifold_precomputed_knn_inputs_t<knn_indices_sparse_t, float>, TPB_X>(handle, inputs, params, embeddings, graph); } else { UMAPAlgo::_fit<knn_indices_sparse_t, float, manifold_precomputed_knn_inputs_t<knn_indices_sparse_t, float>, TPB_X>(handle, inputs, params, embeddings, graph); } } else { manifold_sparse_inputs_t<int, float> inputs(indptr, indices, data, y, nnz, n, d); if (y != nullptr) { UMAPAlgo::_fit_supervised<knn_indices_sparse_t, float, manifold_sparse_inputs_t<knn_indices_sparse_t, float>, TPB_X>(handle, inputs, params, embeddings, graph); } else { UMAPAlgo::_fit<knn_indices_sparse_t, float, manifold_sparse_inputs_t<knn_indices_sparse_t, float>, TPB_X>(handle, inputs, params, embeddings, graph); } } } void transform(const raft::handle_t& handle, float* X, int n, int d, float* orig_X, int orig_n, float* embedding, int embedding_n, UMAPParams* params, float* transformed) { manifold_dense_inputs_t<float> inputs(X, nullptr, n, d); manifold_dense_inputs_t<float> orig_inputs(orig_X, nullptr, orig_n, d); UMAPAlgo::_transform<knn_indices_dense_t, float, manifold_dense_inputs_t<float>, TPB_X>( handle, inputs, orig_inputs, embedding, embedding_n, params, transformed); } void transform_sparse(const raft::handle_t& handle, int* indptr, int* indices, float* data, size_t nnz, int n, int d, int* orig_x_indptr, int* orig_x_indices, float* orig_x_data, size_t orig_nnz, int orig_n, float* embedding, int embedding_n, UMAPParams* params, float* transformed) { manifold_sparse_inputs_t<knn_indices_sparse_t, float> inputs( indptr, indices, data, nullptr, nnz, n, d); manifold_sparse_inputs_t<knn_indices_sparse_t, float> orig_x_inputs( orig_x_indptr, orig_x_indices, orig_x_data, nullptr, orig_nnz, orig_n, d); UMAPAlgo::_transform<knn_indices_sparse_t, float, manifold_sparse_inputs_t<int, float>, TPB_X>( handle, inputs, orig_x_inputs, embedding, embedding_n, params, transformed); } } // namespace UMAP } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/umap/supervised.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "optimize.cuh" #include <cuml/common/logger.hpp> #include <cuml/manifold/umapparams.h> #include <cuml/neighbors/knn.hpp> #include <raft/core/handle.hpp> #include "fuzzy_simpl_set/runner.cuh" #include "init_embed/runner.cuh" #include "knn_graph/runner.cuh" #include "simpl_set_embed/runner.cuh" #include <raft/util/cudart_utils.hpp> #include <thrust/count.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/system/cuda/execution_policy.h> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/coo.hpp> #include <raft/sparse/linalg/add.cuh> #include <raft/sparse/linalg/norm.cuh> #include <raft/sparse/linalg/symmetrize.cuh> #include <raft/sparse/op/filter.cuh> #include <raft/util/cuda_utils.cuh> #include <cuda_runtime.h> namespace UMAPAlgo { namespace Supervised { using namespace ML; template <int TPB_X, typename T> __global__ void fast_intersection_kernel( int* rows, int* cols, T* vals, int nnz, T* target, float unknown_dist = 1.0, float far_dist = 5.0) { int row = (blockIdx.x * TPB_X) + threadIdx.x; if (row < nnz) { int i = rows[row]; int j = cols[row]; if (target[i] == T(-1.0) || target[j] == T(-1.0)) vals[row] *= exp(-unknown_dist); else if (target[i] != target[j]) vals[row] *= exp(-far_dist); } } template <typename T, int TPB_X> void reset_local_connectivity(raft::sparse::COO<T>* in_coo, raft::sparse::COO<T>* out_coo, cudaStream_t stream // size = nnz*2 ) { rmm::device_uvector<int> row_ind(in_coo->n_rows, stream); raft::sparse::convert::sorted_coo_to_csr(in_coo, row_ind.data(), stream); // Perform l_inf normalization raft::sparse::linalg::csr_row_normalize_max<T>( row_ind.data(), in_coo->vals(), in_coo->nnz, in_coo->n_rows, in_coo->vals(), stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); raft::sparse::linalg::coo_symmetrize<T>( in_coo, out_coo, [] __device__(int row, int col, T result, T transpose) { T prod_matrix = result * transpose; return result + transpose - prod_matrix; }, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); } /** * Combine a fuzzy simplicial set with another fuzzy simplicial set * generated from categorical data using categorical distances. The target * data is assumed to be categorical label data (a vector of labels), * and this will update the fuzzy simplicial set to respect that label * data. */ template <typename value_t, int TPB_X> void categorical_simplicial_set_intersection(raft::sparse::COO<value_t>* graph_coo, value_t* target, cudaStream_t stream, float far_dist = 5.0, float unknown_dist = 1.0) { dim3 grid(raft::ceildiv(graph_coo->nnz, TPB_X), 1, 1); dim3 blk(TPB_X, 1, 1); fast_intersection_kernel<TPB_X, value_t><<<grid, blk, 0, stream>>>(graph_coo->rows(), graph_coo->cols(), graph_coo->vals(), graph_coo->nnz, target, unknown_dist, far_dist); } template <typename value_t, int TPB_X> __global__ void sset_intersection_kernel(int* row_ind1, int* cols1, value_t* vals1, int nnz1, int* row_ind2, int* cols2, value_t* vals2, int nnz2, int* result_ind, int* result_cols, value_t* result_vals, int nnz, value_t left_min, value_t right_min, int m, float mix_weight = 0.5) { int row = (blockIdx.x * TPB_X) + threadIdx.x; if (row < m) { int start_idx_res = result_ind[row]; int stop_idx_res = raft::sparse::get_stop_idx(row, m, nnz, result_ind); int start_idx1 = row_ind1[row]; int stop_idx1 = raft::sparse::get_stop_idx(row, m, nnz1, row_ind1); int start_idx2 = row_ind2[row]; int stop_idx2 = raft::sparse::get_stop_idx(row, m, nnz2, row_ind2); for (int j = start_idx_res; j < stop_idx_res; j++) { int col = result_cols[j]; value_t left_val = left_min; for (int k = start_idx1; k < stop_idx1; k++) { if (cols1[k] == col) { left_val = vals1[k]; } } value_t right_val = right_min; for (int k = start_idx2; k < stop_idx2; k++) { if (cols2[k] == col) { right_val = vals2[k]; } } if (left_val > left_min || right_val > right_min) { if (mix_weight < 0.5) { result_vals[j] = left_val * powf(right_val, mix_weight / (1.0 - mix_weight)); } else { result_vals[j] = powf(left_val, (1.0 - mix_weight) / mix_weight) * right_val; } } } } } /** * Computes the CSR column index pointer and values * for the general simplicial set intersecftion. */ template <typename T, int TPB_X> void general_simplicial_set_intersection(int* row1_ind, raft::sparse::COO<T>* in1, int* row2_ind, raft::sparse::COO<T>* in2, raft::sparse::COO<T>* result, float weight, cudaStream_t stream) { rmm::device_uvector<int> result_ind(in1->n_rows, stream); RAFT_CUDA_TRY(cudaMemsetAsync(result_ind.data(), 0, in1->n_rows * sizeof(int), stream)); int result_nnz = raft::sparse::linalg::csr_add_calc_inds<float>(row1_ind, in1->cols(), in1->vals(), in1->nnz, row2_ind, in2->cols(), in2->vals(), in2->nnz, in1->n_rows, result_ind.data(), stream); result->allocate(result_nnz, in1->n_rows, in1->n_cols, true, stream); /** * Element-wise sum of two simplicial sets */ raft::sparse::linalg::csr_add_finalize<float>(row1_ind, in1->cols(), in1->vals(), in1->nnz, row2_ind, in2->cols(), in2->vals(), in2->nnz, in1->n_rows, result_ind.data(), result->cols(), result->vals(), stream); //@todo: Write a wrapper function for this raft::sparse::convert::csr_to_coo<int>( result_ind.data(), result->n_rows, result->rows(), result->nnz, stream); thrust::device_ptr<const T> d_ptr1 = thrust::device_pointer_cast(in1->vals()); T min1 = *(thrust::min_element(thrust::cuda::par.on(stream), d_ptr1, d_ptr1 + in1->nnz)); thrust::device_ptr<const T> d_ptr2 = thrust::device_pointer_cast(in2->vals()); T min2 = *(thrust::min_element(thrust::cuda::par.on(stream), d_ptr2, d_ptr2 + in2->nnz)); T left_min = max(min1 / 2.0, 1e-8); T right_min = max(min2 / 2.0, 1e-8); dim3 grid(raft::ceildiv(in1->nnz, TPB_X), 1, 1); dim3 blk(TPB_X, 1, 1); sset_intersection_kernel<T, TPB_X><<<grid, blk, 0, stream>>>(row1_ind, in1->cols(), in1->vals(), in1->nnz, row2_ind, in2->cols(), in2->vals(), in2->nnz, result_ind.data(), result->cols(), result->vals(), result->nnz, left_min, right_min, in1->n_rows, weight); RAFT_CUDA_TRY(cudaGetLastError()); dim3 grid_n(raft::ceildiv(result->nnz, TPB_X), 1, 1); } template <int TPB_X, typename T> void perform_categorical_intersection(T* y, raft::sparse::COO<T>* rgraph_coo, raft::sparse::COO<T>* final_coo, UMAPParams* params, cudaStream_t stream) { float far_dist = 1.0e12; // target weight if (params->target_weight < 1.0) far_dist = 2.5 * (1.0 / (1.0 - params->target_weight)); categorical_simplicial_set_intersection<T, TPB_X>(rgraph_coo, y, stream, far_dist); raft::sparse::COO<T> comp_coo(stream); raft::sparse::op::coo_remove_zeros<T>(rgraph_coo, &comp_coo, stream); reset_local_connectivity<T, TPB_X>(&comp_coo, final_coo, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); } template <int TPB_X, typename value_idx, typename value_t> void perform_general_intersection(const raft::handle_t& handle, value_t* y, raft::sparse::COO<value_t>* rgraph_coo, raft::sparse::COO<value_t>* final_coo, UMAPParams* params, cudaStream_t stream) { /** * Calculate kNN for Y */ int knn_dims = rgraph_coo->n_rows * params->target_n_neighbors; rmm::device_uvector<value_idx> y_knn_indices(knn_dims, stream); rmm::device_uvector<value_t> y_knn_dists(knn_dims, stream); knn_graph<value_idx, value_t> knn_graph(rgraph_coo->n_rows, params->target_n_neighbors); knn_graph.knn_indices = y_knn_indices.data(); knn_graph.knn_dists = y_knn_dists.data(); manifold_dense_inputs_t<value_t> y_inputs(y, nullptr, rgraph_coo->n_rows, 1); kNNGraph::run<value_idx, value_t, manifold_dense_inputs_t<value_t>>( handle, y_inputs, y_inputs, knn_graph, params->target_n_neighbors, params, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { CUML_LOG_DEBUG("Target kNN Graph"); std::stringstream ss1, ss2; ss1 << raft::arr2Str( y_knn_indices.data(), rgraph_coo->n_rows * params->target_n_neighbors, "knn_indices", stream); CUML_LOG_DEBUG("%s", ss1.str().c_str()); ss2 << raft::arr2Str( y_knn_dists.data(), rgraph_coo->n_rows * params->target_n_neighbors, "knn_dists", stream); CUML_LOG_DEBUG("%s", ss2.str().c_str()); } /** * Compute fuzzy simplicial set */ raft::sparse::COO<value_t> ygraph_coo(stream); FuzzySimplSet::run<TPB_X, value_idx, value_t>(rgraph_coo->n_rows, y_knn_indices.data(), y_knn_dists.data(), params->target_n_neighbors, &ygraph_coo, params, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { CUML_LOG_DEBUG("Target Fuzzy Simplicial Set"); std::stringstream ss; ss << ygraph_coo; CUML_LOG_DEBUG(ss.str().c_str()); } /** * Compute general simplicial set intersection. */ rmm::device_uvector<int> xrow_ind(rgraph_coo->n_rows, stream); rmm::device_uvector<int> yrow_ind(ygraph_coo.n_rows, stream); RAFT_CUDA_TRY(cudaMemsetAsync(xrow_ind.data(), 0, rgraph_coo->n_rows * sizeof(int), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(yrow_ind.data(), 0, ygraph_coo.n_rows * sizeof(int), stream)); raft::sparse::COO<value_t> cygraph_coo(stream); raft::sparse::op::coo_remove_zeros<value_t>(&ygraph_coo, &cygraph_coo, stream); raft::sparse::convert::sorted_coo_to_csr(&cygraph_coo, yrow_ind.data(), stream); raft::sparse::convert::sorted_coo_to_csr(rgraph_coo, xrow_ind.data(), stream); raft::sparse::COO<value_t> result_coo(stream); general_simplicial_set_intersection<value_t, TPB_X>(xrow_ind.data(), rgraph_coo, yrow_ind.data(), &cygraph_coo, &result_coo, params->target_weight, stream); /** * Remove zeros */ raft::sparse::COO<value_t> out(stream); raft::sparse::op::coo_remove_zeros<value_t>(&result_coo, &out, stream); reset_local_connectivity<value_t, TPB_X>(&out, final_coo, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); } } // namespace Supervised } // namespace UMAPAlgo
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/umap/optimize.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/common/logger.hpp> #include <cuml/manifold/umapparams.h> #include <raft/linalg/add.cuh> #include <raft/linalg/eltwise.cuh> #include <raft/linalg/multiply.cuh> #include <raft/linalg/power.cuh> #include <raft/linalg/unary_op.cuh> #include <raft/stats/mean.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <cuda_runtime.h> namespace UMAPAlgo { namespace Optimize { using namespace ML; template <typename T, int TPB_X, typename Lambda> __global__ void map_kernel(T* output, T* X, int n_rows, T* coef, Lambda grad) { int row = (blockIdx.x * TPB_X) + threadIdx.x; if (row < n_rows) { T x = X[row]; T a = coef[0]; T b = coef[1]; output[row] = grad(x, a, b); if (isnan(output[row])) output[row] = 0.0; } } /** * This works on a one-dimensional set of * x-values. */ template <typename T, int TPB_X> void f(T* input, int n_rows, T* coef, T* preds) { dim3 grid(raft::ceildiv(n_rows, TPB_X), 1, 1); dim3 blk(TPB_X, 1, 1); // Function: 1/1+ax^(2b) map_kernel<T, TPB_X><<<grid, blk>>>(preds, input, n_rows, coef, [] __device__(T x, T a, T b) { return 1.0 / (1 + a * pow(x, 2.0 * b)); }); } /** * Calculate the gradients for fitting parameters a and b * to a smooth function based on exponential decay */ template <typename T, int TPB_X> void abLossGrads( T* input, int n_rows, const T* labels, T* coef, T* grads, UMAPParams* params, cudaStream_t stream) { dim3 grid(raft::ceildiv(n_rows, TPB_X), 1, 1); dim3 blk(TPB_X, 1, 1); /** * Calculate residuals */ rmm::device_uvector<T> residuals(n_rows, stream); f<T, TPB_X>(input, n_rows, coef, residuals.data()); raft::linalg::eltwiseSub(residuals.data(), residuals.data(), labels, n_rows, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); /** * Gradient w/ respect to a */ rmm::device_uvector<T> a_deriv(n_rows, stream); raft::copy(a_deriv.data(), input, n_rows, stream); map_kernel<T, TPB_X><<<grid, blk, 0, stream>>>( a_deriv.data(), a_deriv.data(), n_rows, coef, [] __device__ __host__(T x, T a, T b) { return -(pow(x, 2.0 * b)) / pow((1.0 + a * pow(x, 2.0 * b)), 2.0); }); raft::linalg::eltwiseMultiply(a_deriv.data(), a_deriv.data(), residuals.data(), n_rows, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); /** * Gradient w/ respect to b */ rmm::device_uvector<T> b_deriv(n_rows, stream); raft::copy(b_deriv.data(), input, n_rows, stream); map_kernel<T, TPB_X><<<grid, blk, 0, stream>>>( b_deriv.data(), b_deriv.data(), n_rows, coef, [] __device__ __host__(T x, T a, T b) { return -(2.0 * a * pow(x, 2.0 * b) * log(x)) / pow(1 + a * pow(x, 2.0 * b), 2.0); }); /** * Multiply partial derivs by residuals */ raft::linalg::eltwiseMultiply(b_deriv.data(), b_deriv.data(), residuals.data(), n_rows, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); /** * Finally, take the mean */ raft::stats::mean(grads, a_deriv.data(), 1, n_rows, false, false, stream); raft::stats::mean(grads + 1, b_deriv.data(), 1, n_rows, false, false, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); } /** * Perform non-linear gradient descent */ template <typename T, int TPB_X> void optimize_params(T* input, int n_rows, const T* labels, T* coef, UMAPParams* params, cudaStream_t stream, float tolerance = 1e-6, int max_epochs = 25000) { // Don't really need a learning rate since // we aren't using stochastic GD float learning_rate = 1.0; int num_iters = 0; int tol_grads = 0; do { tol_grads = 0; rmm::device_uvector<T> grads(2, stream); RAFT_CUDA_TRY(cudaMemsetAsync(grads.data(), 0, 2 * sizeof(T), stream)); abLossGrads<T, TPB_X>(input, n_rows, labels, coef, grads.data(), params, stream); raft::linalg::multiplyScalar(grads.data(), grads.data(), learning_rate, 2, stream); raft::linalg::eltwiseSub(coef, coef, grads.data(), 2, stream); T* grads_h = (T*)malloc(2 * sizeof(T)); raft::update_host(grads_h, grads.data(), 2, stream); raft::interruptible::synchronize(stream); for (int i = 0; i < 2; i++) { if (abs(grads_h[i]) - tolerance <= 0) tol_grads += 1; } num_iters += 1; free(grads_h); } while (tol_grads < 2 && num_iters < max_epochs); } inline void find_params_ab(UMAPParams* params, cudaStream_t stream) { float spread = params->spread; float min_dist = params->min_dist; float step = (spread * 3.0) / 300.0; auto const X_uptr = std::make_unique<float[]>(300); auto const y_uptr = std::make_unique<float[]>(300); auto* const X = X_uptr.get(); auto* const y = y_uptr.get(); for (int i = 0; i < 300; i++) { X[i] = i * step; y[i] = 0.0; if (X[i] >= min_dist) y[i] = exp(-(X[i] - min_dist) / spread); else if (X[i] < min_dist) y[i] = 1.0; } rmm::device_uvector<float> X_d(300, stream); raft::update_device(X_d.data(), X, 300, stream); rmm::device_uvector<float> y_d(300, stream); raft::update_device(y_d.data(), y, 300, stream); auto const coeffs_h_uptr = std::make_unique<float[]>(2); auto* const coeffs_h = coeffs_h_uptr.get(); coeffs_h[0] = 1.0; coeffs_h[1] = 1.0; rmm::device_uvector<float> coeffs(2, stream); RAFT_CUDA_TRY(cudaMemsetAsync(coeffs.data(), 0, 2 * sizeof(float), stream)); raft::update_device(coeffs.data(), coeffs_h, 2, stream); optimize_params<float, 256>(X_d.data(), 300, y_d.data(), coeffs.data(), params, stream); raft::update_host(&(params->a), coeffs.data(), 1, stream); raft::update_host(&(params->b), coeffs.data() + 1, 1, stream); raft::interruptible::synchronize(stream); CUML_LOG_DEBUG("a=%f, b=%f", params->a, params->b); } } // namespace Optimize } // namespace UMAPAlgo
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/umap/runner.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "optimize.cuh" #include "supervised.cuh" #include <cuml/common/logger.hpp> #include <cuml/manifold/common.hpp> #include <cuml/manifold/umapparams.h> #include "fuzzy_simpl_set/runner.cuh" #include "init_embed/runner.cuh" #include "knn_graph/runner.cuh" #include "simpl_set_embed/runner.cuh" #include <memory> #include <thrust/count.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/system/cuda/execution_policy.h> #include <raft/core/handle.hpp> #include <raft/sparse/convert/csr.cuh> #include <raft/sparse/coo.hpp> #include <raft/sparse/linalg/norm.cuh> #include <raft/sparse/op/filter.cuh> #include <raft/sparse/op/sort.cuh> #include <raft/util/cuda_utils.cuh> #include <common/nvtx.hpp> #include <cuda_runtime.h> #include <raft/core/nvtx.hpp> namespace UMAPAlgo { // Swap this as impls change for now. namespace FuzzySimplSetImpl = FuzzySimplSet::Naive; namespace SimplSetEmbedImpl = SimplSetEmbed::Algo; using namespace ML; template <int TPB_X, typename T> __global__ void init_transform(int* indices, T* weights, int n, const T* embeddings, int embeddings_n, int n_components, T* result, int n_neighbors) { // row-based matrix 1 thread per row int row = (blockIdx.x * TPB_X) + threadIdx.x; int i = row * n_neighbors; // each thread processes one row of the dist matrix if (row < n) { for (int j = 0; j < n_neighbors; j++) { for (int d = 0; d < n_components; d++) { result[row * n_components + d] += weights[i + j] * embeddings[indices[i + j] * n_components + d]; } } } } /** * Fit exponential decay curve to find the parameters * a and b, which are based on min_dist and spread * parameters. */ inline void find_ab(UMAPParams* params, cudaStream_t stream) { Optimize::find_params_ab(params, stream); } template <typename value_idx, typename value_t, typename umap_inputs, int TPB_X> void _get_graph(const raft::handle_t& handle, const umap_inputs& inputs, UMAPParams* params, raft::sparse::COO<value_t, int>* graph) { raft::common::nvtx::range fun_scope("umap::supervised::_get_graph"); cudaStream_t stream = handle.get_stream(); int k = params->n_neighbors; ML::Logger::get().setLevel(params->verbosity); CUML_LOG_DEBUG("n_neighbors=%d", params->n_neighbors); raft::common::nvtx::push_range("umap::knnGraph"); std::unique_ptr<rmm::device_uvector<value_idx>> knn_indices_b = nullptr; std::unique_ptr<rmm::device_uvector<value_t>> knn_dists_b = nullptr; knn_graph<value_idx, value_t> knn_graph(inputs.n, k); /** * If not given precomputed knn graph, compute it */ if (inputs.alloc_knn_graph()) { /** * Allocate workspace for kNN graph */ knn_indices_b = std::make_unique<rmm::device_uvector<value_idx>>(inputs.n * k, stream); knn_dists_b = std::make_unique<rmm::device_uvector<value_t>>(inputs.n * k, stream); knn_graph.knn_indices = knn_indices_b->data(); knn_graph.knn_dists = knn_dists_b->data(); } CUML_LOG_DEBUG("Calling knn graph run"); kNNGraph::run<value_idx, value_t, umap_inputs>( handle, inputs, inputs, knn_graph, k, params, stream); raft::common::nvtx::pop_range(); CUML_LOG_DEBUG("Done. Calling fuzzy simplicial set"); raft::common::nvtx::push_range("umap::simplicial_set"); raft::sparse::COO<value_t> fss_graph(stream); FuzzySimplSet::run<TPB_X, value_idx, value_t>( inputs.n, knn_graph.knn_indices, knn_graph.knn_dists, k, &fss_graph, params, stream); CUML_LOG_DEBUG("Done. Calling remove zeros"); /** * Remove zeros from simplicial set */ raft::sparse::op::coo_remove_zeros<value_t>(&fss_graph, graph, stream); raft::common::nvtx::pop_range(); } template <typename value_idx, typename value_t, typename umap_inputs, int TPB_X> void _get_graph_supervised(const raft::handle_t& handle, const umap_inputs& inputs, UMAPParams* params, raft::sparse::COO<value_t, int>* graph) { raft::common::nvtx::range fun_scope("umap::supervised::_get_graph_supervised"); cudaStream_t stream = handle.get_stream(); int k = params->n_neighbors; ML::Logger::get().setLevel(params->verbosity); if (params->target_n_neighbors == -1) params->target_n_neighbors = params->n_neighbors; raft::common::nvtx::push_range("umap::knnGraph"); std::unique_ptr<rmm::device_uvector<value_idx>> knn_indices_b = nullptr; std::unique_ptr<rmm::device_uvector<value_t>> knn_dists_b = nullptr; knn_graph<value_idx, value_t> knn_graph(inputs.n, k); /** * If not given precomputed knn graph, compute it */ if (inputs.alloc_knn_graph()) { /** * Allocate workspace for kNN graph */ knn_indices_b = std::make_unique<rmm::device_uvector<value_idx>>(inputs.n * k, stream); knn_dists_b = std::make_unique<rmm::device_uvector<value_t>>(inputs.n * k, stream); knn_graph.knn_indices = knn_indices_b->data(); knn_graph.knn_dists = knn_dists_b->data(); } kNNGraph::run<value_idx, value_t, umap_inputs>( handle, inputs, inputs, knn_graph, k, params, stream); raft::common::nvtx::pop_range(); /** * Allocate workspace for fuzzy simplicial set. */ raft::common::nvtx::push_range("umap::simplicial_set"); raft::sparse::COO<value_t> fss_graph(stream); { raft::sparse::COO<value_t> fss_graph_tmp(stream); /** * Run Fuzzy simplicial set */ // int nnz = n*k*2; FuzzySimplSet::run<TPB_X, value_idx, value_t>(inputs.n, knn_graph.knn_indices, knn_graph.knn_dists, params->n_neighbors, &fss_graph_tmp, params, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); raft::sparse::op::coo_remove_zeros<value_t>(&fss_graph_tmp, &fss_graph, stream); } raft::sparse::COO<value_t> ci_graph(stream); /** * If target metric is 'categorical', perform * categorical simplicial set intersection. */ if (params->target_metric == ML::UMAPParams::MetricType::CATEGORICAL) { CUML_LOG_DEBUG("Performing categorical intersection"); Supervised::perform_categorical_intersection<TPB_X, value_t>( inputs.y, &fss_graph, &ci_graph, params, stream); /** * Otherwise, perform general simplicial set intersection */ } else { CUML_LOG_DEBUG("Performing general intersection"); Supervised::perform_general_intersection<TPB_X, value_idx, value_t>( handle, inputs.y, &fss_graph, &ci_graph, params, stream); } /** * Remove zeros */ raft::sparse::op::coo_sort<value_t>(&ci_graph, stream); raft::sparse::op::coo_remove_zeros<value_t>(&ci_graph, graph, stream); raft::common::nvtx::pop_range(); } template <typename value_idx, typename value_t, typename umap_inputs, int TPB_X> void _refine(const raft::handle_t& handle, const umap_inputs& inputs, UMAPParams* params, raft::sparse::COO<value_t>* graph, value_t* embeddings) { cudaStream_t stream = handle.get_stream(); /** * Run simplicial set embedding to approximate low-dimensional representation */ SimplSetEmbed::run<TPB_X, value_t>(inputs.n, inputs.d, graph, params, embeddings, stream); } template <typename value_idx, typename value_t, typename umap_inputs, int TPB_X> void _fit(const raft::handle_t& handle, const umap_inputs& inputs, UMAPParams* params, value_t* embeddings, raft::sparse::COO<float, int>* graph) { raft::common::nvtx::range fun_scope("umap::unsupervised::fit"); cudaStream_t stream = handle.get_stream(); ML::Logger::get().setLevel(params->verbosity); UMAPAlgo::_get_graph<value_idx, value_t, umap_inputs, TPB_X>(handle, inputs, params, graph); /** * Run initialization method */ raft::common::nvtx::push_range("umap::embedding"); InitEmbed::run(handle, inputs.n, inputs.d, graph, params, embeddings, stream, params->init); if (params->callback) { params->callback->setup<value_t>(inputs.n, params->n_components); params->callback->on_preprocess_end(embeddings); } /** * Run simplicial set embedding to approximate low-dimensional representation */ SimplSetEmbed::run<TPB_X, value_t>(inputs.n, inputs.d, graph, params, embeddings, stream); raft::common::nvtx::pop_range(); if (params->callback) params->callback->on_train_end(embeddings); RAFT_CUDA_TRY(cudaPeekAtLastError()); } template <typename value_idx, typename value_t, typename umap_inputs, int TPB_X> void _fit_supervised(const raft::handle_t& handle, const umap_inputs& inputs, UMAPParams* params, value_t* embeddings, raft::sparse::COO<float, int>* graph) { raft::common::nvtx::range fun_scope("umap::supervised::fit"); cudaStream_t stream = handle.get_stream(); ML::Logger::get().setLevel(params->verbosity); UMAPAlgo::_get_graph_supervised<value_idx, value_t, umap_inputs, TPB_X>( handle, inputs, params, graph); /** * Initialize embeddings */ raft::common::nvtx::push_range("umap::supervised::fit"); InitEmbed::run(handle, inputs.n, inputs.d, graph, params, embeddings, stream, params->init); if (params->callback) { params->callback->setup<value_t>(inputs.n, params->n_components); params->callback->on_preprocess_end(embeddings); } /** * Run simplicial set embedding to approximate low-dimensional representation */ SimplSetEmbed::run<TPB_X, value_t>(inputs.n, inputs.d, graph, params, embeddings, stream); raft::common::nvtx::pop_range(); if (params->callback) params->callback->on_train_end(embeddings); RAFT_CUDA_TRY(cudaPeekAtLastError()); } /** * */ template <typename value_idx, typename value_t, typename umap_inputs, int TPB_X> void _transform(const raft::handle_t& handle, const umap_inputs& inputs, umap_inputs& orig_x_inputs, value_t* embedding, int embedding_n, UMAPParams* params, value_t* transformed) { raft::common::nvtx::range fun_scope("umap::transform"); cudaStream_t stream = handle.get_stream(); ML::Logger::get().setLevel(params->verbosity); CUML_LOG_DEBUG("Running transform"); CUML_LOG_DEBUG("Building KNN Graph"); raft::common::nvtx::push_range("umap::knnGraph"); std::unique_ptr<rmm::device_uvector<value_idx>> knn_indices_b = nullptr; std::unique_ptr<rmm::device_uvector<value_t>> knn_dists_b = nullptr; int k = params->n_neighbors; knn_graph<value_idx, value_t> knn_graph(inputs.n, k); /** * If not given precomputed knn graph, compute it */ if (inputs.alloc_knn_graph()) { /** * Allocate workspace for kNN graph */ knn_indices_b = std::make_unique<rmm::device_uvector<value_idx>>(inputs.n * k, stream); knn_dists_b = std::make_unique<rmm::device_uvector<value_t>>(inputs.n * k, stream); knn_graph.knn_indices = knn_indices_b->data(); knn_graph.knn_dists = knn_dists_b->data(); } kNNGraph::run<value_idx, value_t, umap_inputs>( handle, orig_x_inputs, inputs, knn_graph, k, params, stream); raft::common::nvtx::pop_range(); raft::common::nvtx::push_range("umap::smooth_knn"); float adjusted_local_connectivity = max(0.0, params->local_connectivity - 1.0); CUML_LOG_DEBUG("Smoothing KNN distances"); /** * Perform smooth_knn_dist */ rmm::device_uvector<value_t> sigmas(inputs.n, stream); rmm::device_uvector<value_t> rhos(inputs.n, stream); RAFT_CUDA_TRY(cudaMemsetAsync(sigmas.data(), 0, inputs.n * sizeof(value_t), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(rhos.data(), 0, inputs.n * sizeof(value_t), stream)); dim3 grid_n(raft::ceildiv(inputs.n, TPB_X), 1, 1); dim3 blk(TPB_X, 1, 1); FuzzySimplSetImpl::smooth_knn_dist<TPB_X, value_idx, value_t>(inputs.n, knn_graph.knn_indices, knn_graph.knn_dists, rhos.data(), sigmas.data(), params, params->n_neighbors, adjusted_local_connectivity, stream); raft::common::nvtx::pop_range(); /** * Compute graph of membership strengths */ int nnz = inputs.n * params->n_neighbors; dim3 grid_nnz(raft::ceildiv(nnz, TPB_X), 1, 1); CUML_LOG_DEBUG("Executing fuzzy simplicial set"); /** * Allocate workspace for fuzzy simplicial set. */ raft::sparse::COO<value_t> graph_coo(stream, nnz, inputs.n, inputs.n); FuzzySimplSetImpl::compute_membership_strength_kernel<TPB_X> <<<grid_nnz, blk, 0, stream>>>(knn_graph.knn_indices, knn_graph.knn_dists, sigmas.data(), rhos.data(), graph_coo.vals(), graph_coo.rows(), graph_coo.cols(), graph_coo.n_rows, params->n_neighbors); RAFT_CUDA_TRY(cudaPeekAtLastError()); rmm::device_uvector<int> row_ind(inputs.n, stream); rmm::device_uvector<int> ia(inputs.n, stream); raft::sparse::convert::sorted_coo_to_csr(&graph_coo, row_ind.data(), stream); raft::sparse::linalg::coo_degree(&graph_coo, ia.data(), stream); rmm::device_uvector<value_t> vals_normed(graph_coo.nnz, stream); RAFT_CUDA_TRY(cudaMemsetAsync(vals_normed.data(), 0, graph_coo.nnz * sizeof(value_t), stream)); CUML_LOG_DEBUG("Performing L1 normalization"); raft::sparse::linalg::csr_row_normalize_l1<value_t>( row_ind.data(), graph_coo.vals(), graph_coo.nnz, graph_coo.n_rows, vals_normed.data(), stream); init_transform<TPB_X, value_t><<<grid_n, blk, 0, stream>>>(graph_coo.cols(), vals_normed.data(), graph_coo.n_rows, embedding, embedding_n, params->n_components, transformed, params->n_neighbors); RAFT_CUDA_TRY(cudaPeekAtLastError()); RAFT_CUDA_TRY(cudaMemsetAsync(ia.data(), 0.0, ia.size() * sizeof(int), stream)); RAFT_CUDA_TRY(cudaPeekAtLastError()); /** * Go through raft::sparse::COO values and set everything that's less than * vals.max() / params->n_epochs to 0.0 */ thrust::device_ptr<value_t> d_ptr = thrust::device_pointer_cast(graph_coo.vals()); value_t max = *(thrust::max_element(thrust::cuda::par.on(stream), d_ptr, d_ptr + nnz)); int n_epochs = params->n_epochs; if (n_epochs <= 0) { if (inputs.n <= 10000) n_epochs = 100; else n_epochs = 30; } else { n_epochs /= 3; } CUML_LOG_DEBUG("n_epochs=%d", n_epochs); raft::linalg::unaryOp<value_t>( graph_coo.vals(), graph_coo.vals(), graph_coo.nnz, [=] __device__(value_t input) { if (input < (max / float(n_epochs))) return 0.0f; else return input; }, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); /** * Remove zeros */ raft::sparse::COO<value_t> comp_coo(stream); raft::sparse::op::coo_remove_zeros<value_t>(&graph_coo, &comp_coo, stream); raft::common::nvtx::push_range("umap::optimization"); CUML_LOG_DEBUG("Computing # of epochs for training each sample"); rmm::device_uvector<value_t> epochs_per_sample(nnz, stream); SimplSetEmbedImpl::make_epochs_per_sample( comp_coo.vals(), comp_coo.nnz, n_epochs, epochs_per_sample.data(), stream); CUML_LOG_DEBUG("Performing optimization"); if (params->callback) { params->callback->setup<value_t>(inputs.n, params->n_components); params->callback->on_preprocess_end(transformed); } auto initial_alpha = params->initial_alpha / 4.0; SimplSetEmbedImpl::optimize_layout<TPB_X, value_t>(transformed, inputs.n, embedding, embedding_n, comp_coo.rows(), comp_coo.cols(), comp_coo.nnz, epochs_per_sample.data(), params->repulsion_strength, params, n_epochs, stream); raft::common::nvtx::pop_range(); if (params->callback) params->callback->on_train_end(transformed); } } // namespace UMAPAlgo
0
rapidsai_public_repos/cuml/cpp/src/umap
rapidsai_public_repos/cuml/cpp/src/umap/simpl_set_embed/optimize_batch_kernel.cuh
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <common/fast_int_div.cuh> #include <cuml/manifold/umapparams.h> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <cstddef> namespace UMAPAlgo { namespace SimplSetEmbed { namespace Algo { using namespace ML; /** * Calculate the squared distance between two vectors of size n * @{ */ template <typename T> DI T rdist(const T* X, const T* Y, int n) { auto result = T(0.0); for (int i = 0; i < n; i++) { auto diff = T(X[i] - Y[i]); result += diff * diff; } return result; } template <typename T, int LEN> DI T rdist(const T (&X)[LEN], const T (&Y)[LEN]) { auto result = T(0.0); #pragma unroll for (int i = 0; i < LEN; ++i) { auto diff = T(X[i] - Y[i]); result += diff * diff; } return result; } /** @} */ /** * Clip a value to within a lower and upper bound */ template <typename T> DI T clip(T val, T lb, T ub) { return min(max(val, lb), ub); } /** * Calculate the repulsive gradient */ template <typename T> DI T repulsive_grad(T dist_squared, T gamma, UMAPParams params) { auto grad_coeff = T(2.0) * gamma * params.b; grad_coeff /= (T(0.001) + dist_squared) * (params.a * pow(dist_squared, params.b) + T(1.0)); return grad_coeff; } /** * Calculate the attractive gradient */ template <typename T> DI T attractive_grad(T dist_squared, UMAPParams params) { auto grad_coeff = T(-2.0) * params.a * params.b * pow(dist_squared, params.b - T(1.0)); grad_coeff /= params.a * pow(dist_squared, params.b) + T(1.0); return grad_coeff; } template <typename T> DI T truncate_gradient(T const rounding_factor, T const x) { return (rounding_factor + x) - rounding_factor; } template <typename T, int TPB_X, int n_components> __global__ void optimize_batch_kernel_reg(T const* head_embedding, T* head_buffer, int head_n, T const* tail_embedding, T* tail_buffer, const MLCommon::FastIntDiv tail_n, const int* head, const int* tail, int nnz, T const* epochs_per_sample, T* epoch_of_next_negative_sample, T* epoch_of_next_sample, T alpha, int epoch, T gamma, uint64_t seed, bool move_other, UMAPParams params, T nsr_inv, T rounding) { int row = (blockIdx.x * TPB_X) + threadIdx.x; if (row >= nnz) return; auto _epoch_of_next_sample = epoch_of_next_sample[row]; if (_epoch_of_next_sample > epoch) return; auto _epochs_per_sample = epochs_per_sample[row]; auto epochs_per_negative_sample = _epochs_per_sample * nsr_inv; /** * Positive sample stage (attractive forces) */ int j = head[row]; int k = tail[row]; T const* current = head_embedding + (j * n_components); T const* other = tail_embedding + (k * n_components); T* cur_write = head_buffer + (j * n_components); T* oth_write = tail_buffer + (k * n_components); T current_reg[n_components], other_reg[n_components], grads[n_components]; for (int i = 0; i < n_components; ++i) { current_reg[i] = current[i]; other_reg[i] = other[i]; } auto dist_squared = rdist<T, n_components>(current_reg, other_reg); // Attractive force between the two vertices, since they // are connected by an edge in the 1-skeleton. auto attractive_grad_coeff = T(0.0); if (dist_squared > T(0.0)) { attractive_grad_coeff = attractive_grad<T>(dist_squared, params); } /** * Apply attractive force between `current` and `other` * by updating their 'weights' to place them relative * to their weight in the 1-skeleton. * (update `other` embedding only if we are * performing unsupervised training). */ for (int d = 0; d < n_components; d++) { auto diff = current_reg[d] - other_reg[d]; auto grad_d = clip<T>(attractive_grad_coeff * diff, T(-4.0), T(4.0)); grads[d] = grad_d * alpha; } // storing gradients for negative samples back to global memory if (move_other) { for (int d = 0; d < n_components; d++) { raft::myAtomicAdd(oth_write + d, truncate_gradient(rounding, -grads[d])); } } epoch_of_next_sample[row] = _epoch_of_next_sample + _epochs_per_sample; // number of negative samples to choose auto _epoch_of_next_negative_sample = epoch_of_next_negative_sample[row]; int n_neg_samples = int(T(epoch - _epoch_of_next_negative_sample) / epochs_per_negative_sample); /** * Negative sampling stage */ raft::random::detail::PhiloxGenerator gen((uint64_t)seed, (uint64_t)row, 0); for (int p = 0; p < n_neg_samples; p++) { int r; gen.next(r); int t = r % tail_n; T const* negative_sample = tail_embedding + (t * n_components); T negative_sample_reg[n_components]; for (int i = 0; i < n_components; ++i) { negative_sample_reg[i] = negative_sample[i]; } dist_squared = rdist<T, n_components>(current_reg, negative_sample_reg); // repulsive force between two vertices auto repulsive_grad_coeff = T(0.0); if (dist_squared > T(0.0)) { repulsive_grad_coeff = repulsive_grad<T>(dist_squared, gamma, params); } else if (j == t) continue; /** * Apply repulsive force between `current` and `other` * (which has been negatively sampled) by updating * their 'weights' to push them farther in Euclidean space. */ for (int d = 0; d < n_components; d++) { auto diff = current_reg[d] - negative_sample_reg[d]; auto grad_d = T(0.0); if (repulsive_grad_coeff > T(0.0)) grad_d = clip<T>(repulsive_grad_coeff * diff, T(-4.0), T(4.0)); else grad_d = T(4.0); grads[d] += grad_d * alpha; } } // storing gradients for positive samples back to global memory for (int d = 0; d < n_components; d++) { raft::myAtomicAdd(cur_write + d, truncate_gradient(rounding, grads[d])); } epoch_of_next_negative_sample[row] = _epoch_of_next_negative_sample + n_neg_samples * epochs_per_negative_sample; } template <typename T, int TPB_X, bool use_shared_mem> __global__ void optimize_batch_kernel(T const* head_embedding, T* head_buffer, int head_n, T const* tail_embedding, T* tail_buffer, const MLCommon::FastIntDiv tail_n, const int* head, const int* tail, int nnz, T const* epochs_per_sample, T* epoch_of_next_negative_sample, T* epoch_of_next_sample, T alpha, int epoch, T gamma, uint64_t seed, bool move_other, UMAPParams params, T nsr_inv, T rounding) { extern __shared__ T embedding_shared_mem_updates[]; int row = (blockIdx.x * TPB_X) + threadIdx.x; if (row >= nnz) return; auto _epoch_of_next_sample = epoch_of_next_sample[row]; if (_epoch_of_next_sample > epoch) return; auto _epochs_per_sample = epochs_per_sample[row]; auto epochs_per_negative_sample = _epochs_per_sample * nsr_inv; /** * Positive sample stage (attractive forces) */ int j = head[row]; int k = tail[row]; T const* current = head_embedding + (j * params.n_components); T const* other = tail_embedding + (k * params.n_components); T* cur_write = head_buffer + (j * params.n_components); T* oth_write = tail_buffer + (k * params.n_components); T* current_buffer{nullptr}; if (use_shared_mem) { current_buffer = (T*)embedding_shared_mem_updates + threadIdx.x; } auto dist_squared = rdist<T>(current, other, params.n_components); // Attractive force between the two vertices, since they // are connected by an edge in the 1-skeleton. auto attractive_grad_coeff = T(0.0); if (dist_squared > T(0.0)) { attractive_grad_coeff = attractive_grad<T>(dist_squared, params); } /** * Apply attractive force between `current` and `other` * by updating their 'weights' to place them relative * to their weight in the 1-skeleton. * (update `other` embedding only if we are * performing unsupervised training). */ for (int d = 0; d < params.n_components; d++) { auto grad_d = clip<T>(attractive_grad_coeff * (current[d] - other[d]), T(-4.0), T(4.0)); grad_d *= alpha; if (use_shared_mem) { current_buffer[d * TPB_X] = grad_d; } else { raft::myAtomicAdd<T>((T*)cur_write + d, truncate_gradient(rounding, grad_d)); if (move_other) { // happens only during unsupervised training raft::myAtomicAdd<T>((T*)oth_write + d, truncate_gradient(rounding, -grad_d)); } } } // storing gradients for negative samples back to global memory if (use_shared_mem && move_other) { __syncthreads(); for (int d = 0; d < params.n_components; d++) { auto grad = current_buffer[d * TPB_X]; raft::myAtomicAdd<T>((T*)oth_write + d, truncate_gradient(rounding, -grad)); } } epoch_of_next_sample[row] = _epoch_of_next_sample + _epochs_per_sample; // number of negative samples to choose auto _epoch_of_next_negative_sample = epoch_of_next_negative_sample[row]; int n_neg_samples = int(T(epoch - _epoch_of_next_negative_sample) / epochs_per_negative_sample); /** * Negative sampling stage */ raft::random::detail::PhiloxGenerator gen((uint64_t)seed, (uint64_t)row, 0); for (int p = 0; p < n_neg_samples; p++) { int r; gen.next(r); int t = r % tail_n; T const* negative_sample = tail_embedding + (t * params.n_components); dist_squared = rdist<T>(current, negative_sample, params.n_components); // repulsive force between two vertices auto repulsive_grad_coeff = T(0.0); if (dist_squared > T(0.0)) { repulsive_grad_coeff = repulsive_grad<T>(dist_squared, gamma, params); } else if (j == t) continue; /** * Apply repulsive force between `current` and `other` * (which has been negatively sampled) by updating * their 'weights' to push them farther in Euclidean space. */ for (int d = 0; d < params.n_components; d++) { auto grad_d = T(0.0); if (repulsive_grad_coeff > T(0.0)) grad_d = clip<T>(repulsive_grad_coeff * (current[d] - negative_sample[d]), T(-4.0), T(4.0)); else grad_d = T(4.0); grad_d *= alpha; if (use_shared_mem) { current_buffer[d * TPB_X] += grad_d; } else { raft::myAtomicAdd<T>((T*)cur_write + d, truncate_gradient(rounding, grad_d)); } } } // storing gradients for positive samples back to global memory if (use_shared_mem) { __syncthreads(); for (int d = 0; d < params.n_components; d++) { raft::myAtomicAdd<T>((T*)cur_write + d, truncate_gradient(rounding, current_buffer[d * TPB_X])); } } epoch_of_next_negative_sample[row] = _epoch_of_next_negative_sample + n_neg_samples * epochs_per_negative_sample; } /** * @param head_buffer: Buffer the gradient update to head_embedding when deterministic * result is required. They are the same pointer if random seed is not * provided. * @param tail_buffer: Similar to head_buffer, but for tail_embedding. * @param head: Row index in COO connectivity graph. * @param tail: Column index in COO connectivity graph. * @param alpha: Learning rate * @param n: Current epoch * @param rounding: Floating rounding factor used to truncate the gradient update for * deterministic result. */ template <typename T, int TPB_X> void call_optimize_batch_kernel(T const* head_embedding, T* head_buffer, int head_n, T const* tail_embedding, T* tail_buffer, const MLCommon::FastIntDiv& tail_n, const int* head, const int* tail, int nnz, T const* epochs_per_sample, T* epoch_of_next_negative_sample, T* epoch_of_next_sample, T alpha, T gamma, uint64_t seed, bool move_other, UMAPParams const* params, int n, dim3& grid, dim3& blk, cudaStream_t& stream, T rounding) { std::size_t requiredSize = TPB_X * params->n_components; requiredSize *= sizeof(T); bool use_shared_mem = requiredSize < static_cast<std::size_t>(raft::getSharedMemPerBlock()); T nsr_inv = T(1.0) / params->negative_sample_rate; if (params->n_components == 2) { // multicore implementation with registers optimize_batch_kernel_reg<T, TPB_X, 2><<<grid, blk, 0, stream>>>(head_embedding, head_buffer, head_n, tail_embedding, tail_buffer, tail_n, head, tail, nnz, epochs_per_sample, epoch_of_next_negative_sample, epoch_of_next_sample, alpha, n, gamma, seed, move_other, *params, nsr_inv, rounding); } else if (use_shared_mem) { // multicore implementation with shared memory optimize_batch_kernel<T, TPB_X, true> <<<grid, blk, requiredSize, stream>>>(head_embedding, head_buffer, head_n, tail_embedding, tail_buffer, tail_n, head, tail, nnz, epochs_per_sample, epoch_of_next_negative_sample, epoch_of_next_sample, alpha, n, gamma, seed, move_other, *params, nsr_inv, rounding); } else { // multicore implementation without shared memory optimize_batch_kernel<T, TPB_X, false><<<grid, blk, 0, stream>>>(head_embedding, head_buffer, head_n, tail_embedding, tail_buffer, tail_n, head, tail, nnz, epochs_per_sample, epoch_of_next_negative_sample, epoch_of_next_sample, alpha, n, gamma, seed, move_other, *params, nsr_inv, rounding); } } } // namespace Algo } // namespace SimplSetEmbed } // namespace UMAPAlgo
0
rapidsai_public_repos/cuml/cpp/src/umap
rapidsai_public_repos/cuml/cpp/src/umap/simpl_set_embed/algo.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/common/logger.hpp> #include <cuml/manifold/umapparams.h> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <curand.h> #include <math.h> #include <common/fast_int_div.cuh> #include <cstdlib> #include <raft/linalg/unary_op.cuh> #include <raft/sparse/coo.hpp> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include "optimize_batch_kernel.cuh" #include <string> #include <raft/sparse/op/filter.cuh> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/reduce.h> #include <thrust/system/cuda/execution_policy.h> namespace UMAPAlgo { namespace SimplSetEmbed { namespace Algo { using namespace ML; /** * Given a set of weights and number of epochs, generate * the number of epochs per sample for each weight. * * @param weights: The weights of how much we wish to sample each 1-simplex * @param weights_n: the size of the weights array * @param n_epochs: the total number of epochs we want to train for * @param result: an array of number of epochs per sample, one for each 1-simplex * @param stream cuda stream */ template <typename T> void make_epochs_per_sample(T* weights, int weights_n, int n_epochs, T* result, cudaStream_t stream) { thrust::device_ptr<T> d_weights = thrust::device_pointer_cast(weights); T weights_max = *(thrust::max_element(thrust::cuda::par.on(stream), d_weights, d_weights + weights_n)); // result = -1.0 * np.ones( // weights.shape[0], dtype=np.float64 // ) // n_samples = n_epochs * (weights / weights.max()) // result[n_samples > 0] = ( // float(n_epochs) / n_samples[n_samples > 0] // ) raft::linalg::unaryOp<T>( result, weights, weights_n, [=] __device__(T input) { T v = n_epochs * (input / weights_max); if (v > 0) return T(n_epochs) / v; else return T(-1.0); }, stream); } template <typename T> void optimization_iteration_finalization( UMAPParams* params, T* head_embedding, T& alpha, int n, int n_epochs, uint64_t& seed) { if (params->callback) params->callback->on_epoch_end(head_embedding); alpha = params->initial_alpha * (1.0 - (T(n) / T(n_epochs))); seed += 1; } /** * Update the embeddings and clear the buffers when using deterministic algorithm. */ template <typename T> void apply_embedding_updates(T* head_embedding, T* head_buffer, int head_n, T* tail_embedding, T* tail_buffer, int tail_n, UMAPParams* params, bool move_other, rmm::cuda_stream_view stream) { ASSERT(params->deterministic, "Only used when deterministic is set to true."); if (move_other) { auto n_components = params->n_components; thrust::for_each( rmm::exec_policy(stream), thrust::make_counting_iterator(0u), thrust::make_counting_iterator(0u) + std::max(head_n, tail_n) * params->n_components, [=] __device__(uint32_t i) { if (i < head_n * n_components) { head_embedding[i] += head_buffer[i]; head_buffer[i] = 0.0f; } if (i < tail_n * n_components) { tail_embedding[i] += tail_buffer[i]; tail_buffer[i] = 0.0f; } }); } else { // No need to update reference embedding thrust::for_each(rmm::exec_policy(stream), thrust::make_counting_iterator(0u), thrust::make_counting_iterator(0u) + head_n * params->n_components, [=] __device__(uint32_t i) { head_embedding[i] += head_buffer[i]; head_buffer[i] = 0.0f; }); } } /** * \brief Constructs a rounding factor used to truncate elements in a sum such that the * sum of the truncated elements is the same no matter what the order of the sum is. * * Algorithm 5: Reproducible Sequential Sum in 'Fast Reproducible Floating-Point * Summation' by Demmel and Nguyen * * In algorithm 5 the bound is calculated as $max(|v_i|) * n$. We use maximum number of * edges connected to each vertex as n. * * The calculation trick is borrowed from fbcuda, which is BSD-licensed. */ template <typename T> T create_rounding_factor(T max_abs, int n) { T delta = max_abs / (static_cast<T>(1.0) - static_cast<T>(2.0) * n * std::numeric_limits<T>::epsilon()); // Calculate ceil(log_2(delta)). // frexpf() calculates exp and returns `x` such that // delta = x * 2^exp, where `x` in (-1.0, -0.5] U [0.5, 1). // Because |x| < 1, exp is exactly ceil(log_2(delta)). int exp; std::frexp(delta, &exp); // return M = 2 ^ ceil(log_2(delta)) return std::ldexp(static_cast<T>(1.0), exp); } template <typename T> T create_gradient_rounding_factor( const int* head, int nnz, int n_samples, T alpha, rmm::cuda_stream_view stream) { rmm::device_uvector<T> buffer(n_samples, stream); // calculate the maximum number of edges connected to 1 vertex. thrust::reduce_by_key(rmm::exec_policy(stream), head, head + nnz, thrust::make_constant_iterator(1u), thrust::make_discard_iterator(), buffer.data()); auto ptr = thrust::device_pointer_cast(buffer.data()); uint32_t n_edges = *(thrust::max_element(rmm::exec_policy(stream), ptr, ptr + buffer.size())); T max_abs = T(n_edges) * T(4.0) * std::abs(alpha); return create_rounding_factor(max_abs, n_edges); } /** * Runs gradient descent using sampling weights defined on * both the attraction and repulsion vectors. * * In this GD implementation, the weights being tuned are the * embeddings themselves, as the objective function is attracting * positive weights (neighbors in the 1-skeleton) and repelling * negative weights (non-neighbors in the 1-skeleton). */ template <int TPB_X, typename T> void optimize_layout(T* head_embedding, int head_n, T* tail_embedding, int tail_n, const int* head, const int* tail, int nnz, T* epochs_per_sample, float gamma, UMAPParams* params, int n_epochs, cudaStream_t stream) { // Are we doing a fit or a transform? bool move_other = head_embedding == tail_embedding; T alpha = params->initial_alpha; auto stream_view = rmm::cuda_stream_view(stream); rmm::device_uvector<T> epoch_of_next_negative_sample(nnz, stream); T nsr_inv = T(1.0) / params->negative_sample_rate; raft::linalg::unaryOp<T>( epoch_of_next_negative_sample.data(), epochs_per_sample, nnz, [=] __device__(T input) { return input * nsr_inv; }, stream); rmm::device_uvector<T> epoch_of_next_sample(nnz, stream); raft::copy(epoch_of_next_sample.data(), epochs_per_sample, nnz, stream); // Buffers used to store the gradient updates to avoid conflicts rmm::device_uvector<T> head_buffer(0, stream_view); rmm::device_uvector<T> tail_buffer(0, stream_view); // Write to embedding directly if deterministic is not needed. T* d_head_buffer = head_embedding; T* d_tail_buffer = tail_embedding; if (params->deterministic) { head_buffer.resize(head_n * params->n_components, stream_view); RAFT_CUDA_TRY( cudaMemsetAsync(head_buffer.data(), '\0', sizeof(T) * head_buffer.size(), stream)); // No need for tail if it's not being written. if (move_other) { tail_buffer.resize(tail_n * params->n_components, stream_view); RAFT_CUDA_TRY( cudaMemsetAsync(tail_buffer.data(), '\0', sizeof(T) * tail_buffer.size(), stream)); } d_head_buffer = head_buffer.data(); d_tail_buffer = tail_buffer.data(); } dim3 grid(raft::ceildiv(nnz, TPB_X), 1, 1); dim3 blk(TPB_X, 1, 1); uint64_t seed = params->random_state; T rounding = create_gradient_rounding_factor<T>(head, nnz, head_n, alpha, stream_view); MLCommon::FastIntDiv tail_n_fast(tail_n); for (int n = 0; n < n_epochs; n++) { call_optimize_batch_kernel<T, TPB_X>(head_embedding, d_head_buffer, head_n, tail_embedding, d_tail_buffer, tail_n_fast, head, tail, nnz, epochs_per_sample, epoch_of_next_negative_sample.data(), epoch_of_next_sample.data(), alpha, gamma, seed, move_other, params, n, grid, blk, stream, rounding); if (params->deterministic) { apply_embedding_updates(head_embedding, d_head_buffer, head_n, tail_embedding, d_tail_buffer, tail_n, params, move_other, stream_view); } RAFT_CUDA_TRY(cudaGetLastError()); optimization_iteration_finalization(params, head_embedding, alpha, n, n_epochs, seed); } } /** * Perform a fuzzy simplicial set embedding by minimizing * the fuzzy set cross entropy between the embeddings * and their 1-skeletons. */ template <int TPB_X, typename T> void launcher( int m, int n, raft::sparse::COO<T>* in, UMAPParams* params, T* embedding, cudaStream_t stream) { int nnz = in->nnz; /** * Find vals.max() */ thrust::device_ptr<const T> d_ptr = thrust::device_pointer_cast(in->vals()); T max = *(thrust::max_element(thrust::cuda::par.on(stream), d_ptr, d_ptr + nnz)); int n_epochs = params->n_epochs; if (n_epochs <= 0) { if (m <= 10000) n_epochs = 500; else n_epochs = 200; } /** * Go through COO values and set everything that's less than * vals.max() / params->n_epochs to 0.0 */ raft::linalg::unaryOp<T>( in->vals(), in->vals(), nnz, [=] __device__(T input) { if (input < (max / float(n_epochs))) return 0.0f; else return input; }, stream); raft::sparse::COO<T> out(stream); raft::sparse::op::coo_remove_zeros<T>(in, &out, stream); rmm::device_uvector<T> epochs_per_sample(out.nnz, stream); RAFT_CUDA_TRY(cudaMemsetAsync(epochs_per_sample.data(), 0, out.nnz * sizeof(T), stream)); make_epochs_per_sample(out.vals(), out.nnz, n_epochs, epochs_per_sample.data(), stream); if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { std::stringstream ss; ss << raft::arr2Str(epochs_per_sample.data(), out.nnz, "epochs_per_sample", stream); CUML_LOG_DEBUG(ss.str().c_str()); } optimize_layout<TPB_X, T>(embedding, m, embedding, m, out.rows(), out.cols(), out.nnz, epochs_per_sample.data(), params->repulsion_strength, params, n_epochs, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); } } // namespace Algo } // namespace SimplSetEmbed } // namespace UMAPAlgo
0
rapidsai_public_repos/cuml/cpp/src/umap
rapidsai_public_repos/cuml/cpp/src/umap/simpl_set_embed/runner.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "algo.cuh" #include <cuml/manifold/umapparams.h> #include <raft/sparse/coo.hpp> namespace UMAPAlgo { namespace SimplSetEmbed { using namespace ML; template <int TPB_X, typename T> void run(int m, int n, raft::sparse::COO<T>* coo, UMAPParams* params, T* embedding, cudaStream_t stream, int algorithm = 0) { switch (algorithm) { case 0: SimplSetEmbed::Algo::launcher<TPB_X, T>(m, n, coo, params, embedding, stream); } } } // namespace SimplSetEmbed } // namespace UMAPAlgo
0
rapidsai_public_repos/cuml/cpp/src/umap
rapidsai_public_repos/cuml/cpp/src/umap/fuzzy_simpl_set/naive.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/common/logger.hpp> #include <cuml/manifold/umapparams.h> #include <cuml/neighbors/knn.hpp> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/sparse/coo.hpp> #include <raft/sparse/linalg/symmetrize.cuh> #include <raft/sparse/op/sort.cuh> #include <raft/stats/mean.cuh> #include <cuda_runtime.h> #include <stdio.h> #include <string> namespace UMAPAlgo { namespace FuzzySimplSet { namespace Naive { using namespace ML; static const float MAX_FLOAT = std::numeric_limits<float>::max(); static const float MIN_FLOAT = std::numeric_limits<float>::min(); static const float SMOOTH_K_TOLERANCE = 1e-5; static const float MIN_K_DIST_SCALE = 1e-3; /** * Computes a continuous version of the distance to the kth nearest neighbor. * That is, this is similar to knn-distance but allows continuous k values * rather than requiring an integral k. In essence, we are simply computing * the distance such that the cardinality of fuzzy set we generate is k. * * TODO: The data needs to be in column-major format (and the indices * of knn_dists and knn_inds transposed) so that we can take advantage * of read-coalescing within each block where possible. * * @param knn_dists: Distances to nearest neighbors for each sample. Each row should * be a sorted list of distances to a given sample's nearest neighbors. * * @param n: The number of samples * @param mean_dist: The mean distance * @param sigmas: An array of size n representing the distance to the kth nearest neighbor, * as suitably approximated. * @param rhos: An array of size n representing the distance to the 1st nearest neighbor * for each point. * @param n_neighbors: The number of neighbors * * @param local_connectivity: The local connectivity required -- i.e. the number of nearest * neighbors that should be assumed to be connected at a local * level. The higher this value the more connected the manifold * becomes locally. In practice, this should not be more than the * local intrinsic dimension of the manifold. * * @param n_iter The number of smoothing iterations to run * @param bandwidth Scale factor for log of neighbors * * Descriptions adapted from: https://github.com/lmcinnes/umap/blob/master/umap/umap_.py * */ template <int TPB_X, typename value_t> __global__ void smooth_knn_dist_kernel(const value_t* knn_dists, int n, float mean_dist, value_t* sigmas, value_t* rhos, // Size of n, iniitalized to zeros int n_neighbors, float local_connectivity = 1.0, int n_iter = 64, float bandwidth = 1.0) { // row-based matrix 1 thread per row int row = (blockIdx.x * TPB_X) + threadIdx.x; int i = row * n_neighbors; // each thread processes one row of the dist matrix if (row < n) { float target = __log2f(n_neighbors) * bandwidth; float lo = 0.0; float hi = MAX_FLOAT; float mid = 1.0; int total_nonzero = 0; int max_nonzero = -1; int start_nonzero = -1; float sum = 0.0; for (int idx = 0; idx < n_neighbors; idx++) { float cur_dist = knn_dists[i + idx]; sum += cur_dist; if (cur_dist > 0.0) { if (start_nonzero == -1) start_nonzero = idx; total_nonzero++; } if (cur_dist > max_nonzero) max_nonzero = cur_dist; } float ith_distances_mean = sum / float(n_neighbors); if (total_nonzero >= local_connectivity) { int index = int(floor(local_connectivity)); float interpolation = local_connectivity - index; if (index > 0) { rhos[row] = knn_dists[i + start_nonzero + (index - 1)]; if (interpolation > SMOOTH_K_TOLERANCE) { rhos[row] += interpolation * (knn_dists[i + start_nonzero + index] - knn_dists[i + start_nonzero + (index - 1)]); } } else rhos[row] = interpolation * knn_dists[i + start_nonzero]; } else if (total_nonzero > 0) rhos[row] = max_nonzero; for (int iter = 0; iter < n_iter; iter++) { float psum = 0.0; for (int j = 1; j < n_neighbors; j++) { float d = knn_dists[i + j] - rhos[row]; if (d > 0) psum += exp(-(d / mid)); else psum += 1.0; } if (fabsf(psum - target) < SMOOTH_K_TOLERANCE) { break; } if (psum > target) { hi = mid; mid = (lo + hi) / 2.0; } else { lo = mid; if (hi == MAX_FLOAT) mid *= 2; else mid = (lo + hi) / 2.0; } } sigmas[row] = mid; if (rhos[row] > 0.0) { if (sigmas[row] < MIN_K_DIST_SCALE * ith_distances_mean) sigmas[row] = MIN_K_DIST_SCALE * ith_distances_mean; } else { if (sigmas[row] < MIN_K_DIST_SCALE * mean_dist) sigmas[row] = MIN_K_DIST_SCALE * mean_dist; } } } /** * Construct the membership strength data for the 1-skeleton of each local * fuzzy simplicial set -- this is formed as a sparse matrix (COO) where each * row is a local fuzzy simplicial set, with a membership strength for the * 1-simplex to each other data point. * * @param knn_indices: the knn index matrix of size (n, k) * @param knn_dists: the knn distance matrix of size (n, k) * @param sigmas: array of size n representing distance to kth nearest neighbor * @param rhos: array of size n representing distance to the first nearest neighbor * @param vals: value_t array of size n*k * @param rows: value_idx array of size n * @param cols: value_idx array of size k * @param n Number of samples (rows in knn indices/distances) * @param n_neighbors number of columns in knn indices/distances * * Descriptions adapted from: https://github.com/lmcinnes/umap/blob/master/umap/umap_.py */ template <int TPB_X, typename value_idx, typename value_t> __global__ void compute_membership_strength_kernel( const value_idx* knn_indices, const float* knn_dists, // nn outputs const value_t* sigmas, const value_t* rhos, // continuous dists to nearest neighbors value_t* vals, int* rows, int* cols, // result coo int n, int n_neighbors) { // model params // row-based matrix is best int idx = (blockIdx.x * TPB_X) + threadIdx.x; if (idx < n * n_neighbors) { int row = idx / n_neighbors; // one neighbor per thread double cur_rho = rhos[row]; double cur_sigma = sigmas[row]; value_idx cur_knn_ind = knn_indices[idx]; double cur_knn_dist = knn_dists[idx]; if (cur_knn_ind != -1) { double val = 0.0; if (cur_knn_ind == row) val = 0.0; else if (cur_knn_dist - cur_rho <= 0.0 || cur_sigma == 0.0) val = 1.0; else { val = exp(-((cur_knn_dist - cur_rho) / (cur_sigma))); if (val < MIN_FLOAT) val = MIN_FLOAT; } rows[idx] = row; cols[idx] = cur_knn_ind; vals[idx] = val; } } } /* * Sets up and runs the knn dist smoothing */ template <int TPB_X, typename value_idx, typename value_t> void smooth_knn_dist(int n, const value_idx* knn_indices, const float* knn_dists, value_t* rhos, value_t* sigmas, UMAPParams* params, int n_neighbors, float local_connectivity, cudaStream_t stream) { dim3 grid(raft::ceildiv(n, TPB_X), 1, 1); dim3 blk(TPB_X, 1, 1); rmm::device_uvector<value_t> dist_means_dev(n_neighbors, stream); raft::stats::mean(dist_means_dev.data(), knn_dists, 1, n_neighbors * n, false, false, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); value_t mean_dist = 0.0; raft::update_host(&mean_dist, dist_means_dev.data(), 1, stream); raft::interruptible::synchronize(stream); /** * Smooth kNN distances to be continuous */ smooth_knn_dist_kernel<TPB_X><<<grid, blk, 0, stream>>>( knn_dists, n, mean_dist, sigmas, rhos, n_neighbors, local_connectivity); RAFT_CUDA_TRY(cudaPeekAtLastError()); } /** * Given a set of X, a neighborhood size, and a measure of distance, compute * the fuzzy simplicial set (here represented as a fuzzy graph in the form of * a sparse coo matrix) associated to the data. This is done by locally * approximating geodesic (manifold surface) distance at each point, creating * a fuzzy simplicial set for each such point, and then combining all the local * fuzzy simplicial sets into a global one via a fuzzy union. * * @param n the number of rows/elements in X * @param knn_indices indexes of knn search * @param knn_dists distances of knn search * @param n_neighbors number of neighbors in knn search arrays * @param out The output COO sparse matrix * @param params UMAPParams config object * @param stream cuda stream to use for device operations */ template <int TPB_X, typename value_idx, typename value_t> void launcher(int n, const value_idx* knn_indices, const value_t* knn_dists, int n_neighbors, raft::sparse::COO<value_t>* out, UMAPParams* params, cudaStream_t stream) { /** * Calculate mean distance through a parallel reduction */ rmm::device_uvector<value_t> sigmas(n, stream); rmm::device_uvector<value_t> rhos(n, stream); RAFT_CUDA_TRY(cudaMemsetAsync(sigmas.data(), 0, n * sizeof(value_t), stream)); RAFT_CUDA_TRY(cudaMemsetAsync(rhos.data(), 0, n * sizeof(value_t), stream)); smooth_knn_dist<TPB_X, value_idx, value_t>(n, knn_indices, knn_dists, rhos.data(), sigmas.data(), params, n_neighbors, params->local_connectivity, stream); raft::sparse::COO<value_t> in(stream, n * n_neighbors, n, n); // check for logging in order to avoid the potentially costly `arr2Str` call! if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { CUML_LOG_DEBUG("Smooth kNN Distances"); auto str = raft::arr2Str(sigmas.data(), 25, "sigmas", stream); CUML_LOG_DEBUG("%s", str.c_str()); str = raft::arr2Str(rhos.data(), 25, "rhos", stream); CUML_LOG_DEBUG("%s", str.c_str()); } RAFT_CUDA_TRY(cudaPeekAtLastError()); /** * Compute graph of membership strengths */ dim3 grid_elm(raft::ceildiv(n * n_neighbors, TPB_X), 1, 1); dim3 blk_elm(TPB_X, 1, 1); compute_membership_strength_kernel<TPB_X><<<grid_elm, blk_elm, 0, stream>>>(knn_indices, knn_dists, sigmas.data(), rhos.data(), in.vals(), in.rows(), in.cols(), in.n_rows, n_neighbors); RAFT_CUDA_TRY(cudaPeekAtLastError()); if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { CUML_LOG_DEBUG("Compute Membership Strength"); std::stringstream ss; ss << in; CUML_LOG_DEBUG(ss.str().c_str()); } /** * Combines all the fuzzy simplicial sets into a global * one via a fuzzy union. (Symmetrize knn graph). */ float set_op_mix_ratio = params->set_op_mix_ratio; raft::sparse::linalg::coo_symmetrize<value_t>( &in, out, [set_op_mix_ratio] __device__(int row, int col, value_t result, value_t transpose) { value_t prod_matrix = result * transpose; value_t res = set_op_mix_ratio * (result + transpose - prod_matrix) + (1.0 - set_op_mix_ratio) * prod_matrix; return res; }, stream); raft::sparse::op::coo_sort<value_t>(out, stream); } } // namespace Naive } // namespace FuzzySimplSet }; // namespace UMAPAlgo
0
rapidsai_public_repos/cuml/cpp/src/umap
rapidsai_public_repos/cuml/cpp/src/umap/fuzzy_simpl_set/runner.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "naive.cuh" #include <cuml/manifold/umapparams.h> #include <raft/sparse/coo.hpp> namespace UMAPAlgo { namespace FuzzySimplSet { using namespace ML; /** * Calculates a fuzzy simplicial set of the input X and kNN results * @param n: number of rows in X * @param knn_indices: matrix of kNN indices size (nxn) * @param knn_dists: matrix of kNN dists size (nxn) * @param n_neighbors number of neighbors * @param coo input knn-graph * @param params umap parameters * @param stream cuda stream * @param algorithm algo type to choose */ template <int TPB_X, typename value_idx, typename T> void run(int n, const value_idx* knn_indices, const T* knn_dists, int n_neighbors, raft::sparse::COO<T>* coo, UMAPParams* params, cudaStream_t stream, int algorithm = 0) { switch (algorithm) { case 0: Naive::launcher<TPB_X, value_idx, T>( n, knn_indices, knn_dists, n_neighbors, coo, params, stream); break; } } } // namespace FuzzySimplSet }; // namespace UMAPAlgo
0
rapidsai_public_repos/cuml/cpp/src/umap
rapidsai_public_repos/cuml/cpp/src/umap/knn_graph/algo.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/manifold/common.hpp> #include <cuml/manifold/umapparams.h> #include <cuml/neighbors/knn_sparse.hpp> #include <iostream> #include <raft/distance/distance_types.hpp> #include <raft/linalg/unary_op.cuh> #include <raft/sparse/selection/knn.cuh> #include <raft/spatial/knn/knn.cuh> #include <raft/util/cudart_utils.hpp> #include <raft/core/error.hpp> namespace UMAPAlgo { namespace kNNGraph { namespace Algo { /** * Initial implementation calls out to FAISS to do its work. */ template <typename value_idx = int64_t, typename value_t = float, typename umap_inputs> void launcher(const raft::handle_t& handle, const umap_inputs& inputsA, const umap_inputs& inputsB, ML::knn_graph<value_idx, value_t>& out, int n_neighbors, const ML::UMAPParams* params, cudaStream_t stream); // Instantiation for dense inputs, int64_t indices template <> inline void launcher(const raft::handle_t& handle, const ML::manifold_dense_inputs_t<float>& inputsA, const ML::manifold_dense_inputs_t<float>& inputsB, ML::knn_graph<int64_t, float>& out, int n_neighbors, const ML::UMAPParams* params, cudaStream_t stream) { std::vector<float*> ptrs(1); std::vector<int> sizes(1); ptrs[0] = inputsA.X; sizes[0] = inputsA.n; raft::spatial::knn::brute_force_knn(handle, ptrs, sizes, inputsA.d, inputsB.X, inputsB.n, out.knn_indices, out.knn_dists, n_neighbors, true, true, static_cast<std::vector<int64_t>*>(nullptr), params->metric, params->p); } // Instantiation for dense inputs, int indices template <> inline void launcher(const raft::handle_t& handle, const ML::manifold_dense_inputs_t<float>& inputsA, const ML::manifold_dense_inputs_t<float>& inputsB, ML::knn_graph<int, float>& out, int n_neighbors, const ML::UMAPParams* params, cudaStream_t stream) { throw raft::exception("Dense KNN doesn't yet support 32-bit integer indices"); } template <> inline void launcher(const raft::handle_t& handle, const ML::manifold_sparse_inputs_t<int, float>& inputsA, const ML::manifold_sparse_inputs_t<int, float>& inputsB, ML::knn_graph<int, float>& out, int n_neighbors, const ML::UMAPParams* params, cudaStream_t stream) { raft::sparse::selection::brute_force_knn(inputsA.indptr, inputsA.indices, inputsA.data, inputsA.nnz, inputsA.n, inputsA.d, inputsB.indptr, inputsB.indices, inputsB.data, inputsB.nnz, inputsB.n, inputsB.d, out.knn_indices, out.knn_dists, n_neighbors, handle, ML::Sparse::DEFAULT_BATCH_SIZE, ML::Sparse::DEFAULT_BATCH_SIZE, params->metric, params->p); } template <> inline void launcher(const raft::handle_t& handle, const ML::manifold_sparse_inputs_t<int64_t, float>& inputsA, const ML::manifold_sparse_inputs_t<int64_t, float>& inputsB, ML::knn_graph<int64_t, float>& out, int n_neighbors, const ML::UMAPParams* params, cudaStream_t stream) { throw raft::exception("Sparse KNN doesn't support 64-bit integer indices"); } template <> inline void launcher(const raft::handle_t& handle, const ML::manifold_precomputed_knn_inputs_t<int64_t, float>& inputsA, const ML::manifold_precomputed_knn_inputs_t<int64_t, float>& inputsB, ML::knn_graph<int64_t, float>& out, int n_neighbors, const ML::UMAPParams* params, cudaStream_t stream) { out.knn_indices = inputsA.knn_graph.knn_indices; out.knn_dists = inputsA.knn_graph.knn_dists; } // Instantiation for precomputed inputs, int indices template <> inline void launcher(const raft::handle_t& handle, const ML::manifold_precomputed_knn_inputs_t<int, float>& inputsA, const ML::manifold_precomputed_knn_inputs_t<int, float>& inputsB, ML::knn_graph<int, float>& out, int n_neighbors, const ML::UMAPParams* params, cudaStream_t stream) { out.knn_indices = inputsA.knn_graph.knn_indices; out.knn_dists = inputsA.knn_graph.knn_dists; } } // namespace Algo } // namespace kNNGraph }; // namespace UMAPAlgo
0
rapidsai_public_repos/cuml/cpp/src/umap
rapidsai_public_repos/cuml/cpp/src/umap/knn_graph/runner.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "algo.cuh" #include <cuml/manifold/common.hpp> namespace UMAPAlgo { namespace kNNGraph { using namespace ML; /** * @brief This function performs a k-nearest neighbors against * the input algorithm using the specified knn algorithm. * Only algorithm supported at the moment is brute force * knn primitive. * @tparam value_idx: Type of knn indices matrix. Usually an integral type. * @tparam value_t: Type of input, query, and dist matrices. Usually float * @param[in] X: Matrix to query (size n x d) in row-major format * @param[in] n: Number of rows in X * @param[in] query: Search matrix in row-major format * @param[in] q_n: Number of rows in query matrix * @param[in] d: Number of columns in X and query matrices * @param[out] knn_graph : output knn_indices and knn_dists (size n*k) * @param[in] n_neighbors: Number of closest neighbors, k, to query * @param[in] params: Instance of UMAPParam settings * @param[in] stream: cuda stream to use * @param[in] algo: Algorithm to use. Currently only brute force is supported */ template <typename value_idx = int64_t, typename value_t = float, typename umap_inputs> void run(const raft::handle_t& handle, const umap_inputs& inputsA, const umap_inputs& inputsB, knn_graph<value_idx, value_t>& out, int n_neighbors, const UMAPParams* params, cudaStream_t stream, int algo = 0) { switch (algo) { /** * Initial algo uses FAISS indices */ case 0: Algo::launcher<value_idx, value_t, umap_inputs>( handle, inputsA, inputsB, out, n_neighbors, params, stream); break; } } } // namespace kNNGraph }; // namespace UMAPAlgo
0
rapidsai_public_repos/cuml/cpp/src/umap
rapidsai_public_repos/cuml/cpp/src/umap/init_embed/random_algo.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/manifold/umapparams.h> #include <raft/random/rng.cuh> namespace UMAPAlgo { namespace InitEmbed { namespace RandomInit { using namespace ML; template <typename T> void launcher(int n, int d, UMAPParams* params, T* embedding, cudaStream_t stream) { uint64_t seed = params->random_state; raft::random::Rng r(seed); r.uniform<T>(embedding, n * params->n_components, -10, 10, stream); } } // namespace RandomInit } // namespace InitEmbed }; // namespace UMAPAlgo
0
rapidsai_public_repos/cuml/cpp/src/umap
rapidsai_public_repos/cuml/cpp/src/umap/init_embed/runner.cuh
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/manifold/umapparams.h> #include <raft/sparse/coo.hpp> #include "random_algo.cuh" #include "spectral_algo.cuh" namespace UMAPAlgo { namespace InitEmbed { using namespace ML; template <typename T> void run(const raft::handle_t& handle, int n, int d, raft::sparse::COO<float>* coo, UMAPParams* params, T* embedding, cudaStream_t stream, int algo = 0) { switch (algo) { /** * Initial algo uses FAISS indices */ case 0: RandomInit::launcher(n, d, params, embedding, handle.get_stream()); break; case 1: SpectralInit::launcher(handle, n, d, coo, params, embedding); break; } } } // namespace InitEmbed }; // namespace UMAPAlgo
0
rapidsai_public_repos/cuml/cpp/src/umap
rapidsai_public_repos/cuml/cpp/src/umap/init_embed/spectral_algo.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuml/cluster/spectral.hpp> #include <cuml/manifold/umapparams.h> #include <raft/linalg/add.cuh> #include <raft/linalg/transpose.cuh> #include <raft/random/rng.cuh> #include <raft/sparse/coo.hpp> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <iostream> namespace UMAPAlgo { namespace InitEmbed { namespace SpectralInit { using namespace ML; /** * Performs a spectral layout initialization */ template <typename T> void launcher(const raft::handle_t& handle, int n, int d, raft::sparse::COO<float>* coo, UMAPParams* params, T* embedding) { cudaStream_t stream = handle.get_stream(); ASSERT(n > params->n_components, "Spectral layout requires n_samples > n_components"); rmm::device_uvector<T> tmp_storage(n * params->n_components, stream); uint64_t seed = params->random_state; Spectral::fit_embedding(handle, coo->rows(), coo->cols(), coo->vals(), coo->nnz, n, params->n_components, tmp_storage.data(), seed); raft::linalg::transpose(handle, tmp_storage.data(), embedding, n, params->n_components, stream); raft::linalg::unaryOp<T>( tmp_storage.data(), tmp_storage.data(), n * params->n_components, [=] __device__(T input) { return fabsf(input); }, stream); thrust::device_ptr<T> d_ptr = thrust::device_pointer_cast(tmp_storage.data()); T max = *(thrust::max_element(thrust::cuda::par.on(stream), d_ptr, d_ptr + (n * params->n_components))); // Reuse tmp_storage to add random noise raft::random::Rng r(seed); r.normal(tmp_storage.data(), n * params->n_components, 0.0f, 0.0001f, stream); raft::linalg::unaryOp<T>( embedding, embedding, n * params->n_components, [=] __device__(T input) { return (10.0f / max) * input; }, stream); raft::linalg::add(embedding, embedding, tmp_storage.data(), n * params->n_components, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); } } // namespace SpectralInit } // namespace InitEmbed }; // namespace UMAPAlgo
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsa/auto_arima.cuh
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda_runtime.h> #include <cub/cub.cuh> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/scan.h> #include <thrust/transform.h> #include <common/fast_int_div.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> namespace ML { namespace TimeSeries { struct BoolToIntFunctor { HDI int operator()(const bool& a) const { return static_cast<int>(a); } }; /** * Helper to compute the cumulative sum of a boolean mask * * @param[in] mask Input boolean array * @param[out] cumul Output cumulative sum * @param[in] mask_size Size of the arrays * @param[in] stream CUDA stream */ void cumulative_sum_helper(const bool* mask, int* cumul, int mask_size, cudaStream_t stream) { BoolToIntFunctor conversion_op; cub::TransformInputIterator<int, BoolToIntFunctor, const bool*> itr(mask, conversion_op); // Determine temporary storage size size_t temp_storage_bytes = 0; cub::DeviceScan::InclusiveSum(NULL, temp_storage_bytes, itr, cumul, mask_size, stream); // Allocate temporary storage rmm::device_uvector<uint8_t> temp_storage(temp_storage_bytes, stream); void* d_temp_storage = (void*)temp_storage.data(); // Execute the scan cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, itr, cumul, mask_size, stream); } /** * Batch division by mask step 1: build an index of the position of each series * in its new batch and measure the size of each sub-batch * * @param[in] d_mask Boolean mask * @param[out] d_index Index of each series in its new batch * @param[in] batch_size Batch size * @param[in] stream CUDA stream * @return The number of 'true' series in the mask */ inline int divide_by_mask_build_index(const bool* d_mask, int* d_index, int batch_size, cudaStream_t stream) { // Inverse mask rmm::device_uvector<bool> inv_mask(batch_size, stream); thrust::transform(thrust::cuda::par.on(stream), d_mask, d_mask + batch_size, inv_mask.data(), thrust::logical_not<bool>()); // Cumulative sum of the inverse mask rmm::device_uvector<int> index0(batch_size, stream); cumulative_sum_helper(inv_mask.data(), index0.data(), batch_size, stream); // Cumulative sum of the mask rmm::device_uvector<int> index1(batch_size, stream); cumulative_sum_helper(d_mask, index1.data(), batch_size, stream); // Combine both cumulative sums according to the mask and subtract 1 const int* d_index0 = index0.data(); const int* d_index1 = index1.data(); auto counting = thrust::make_counting_iterator(0); thrust::for_each( thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int i) { d_index[i] = (d_mask[i] ? d_index1[i] : d_index0[i]) - 1; }); // Compute and return the number of true elements in the mask int true_elements; raft::update_host(&true_elements, index1.data() + batch_size - 1, 1, stream); raft::interruptible::synchronize(stream); return true_elements; } /** * Kernel for the batch division by mask * * @param[in] d_in Input batch * @param[in] d_mask Boolean mask * @param[in] d_index Index of each series in its new batch * @param[out] d_out0 The sub-batch for the 'false' members * @param[out] d_out1 The sub-batch for the 'true' members * @param[in] n_obs Number of data points per series */ template <typename DataT> __global__ void divide_by_mask_kernel(const DataT* d_in, const bool* d_mask, const int* d_index, DataT* d_out0, DataT* d_out1, int n_obs) { const DataT* b_in = d_in + n_obs * blockIdx.x; DataT* b_out = (d_mask[blockIdx.x] ? d_out1 : d_out0) + n_obs * d_index[blockIdx.x]; for (int i = threadIdx.x; i < n_obs; i += blockDim.x) { b_out[i] = b_in[i]; } } /** * Batch division by mask step 2: create both sub-batches from the mask and * index * * @param[in] d_in Input batch. Each series is a contiguous chunk * @param[in] d_mask Boolean mask * @param[in] d_index Index of each series in its new batch * @param[out] d_out0 The sub-batch for the 'false' members * @param[out] d_out1 The sub-batch for the 'true' members * @param[in] batch_size Batch size * @param[in] n_obs Number of data points per series * @param[in] stream CUDA stream */ template <typename DataT> inline void divide_by_mask_execute(const DataT* d_in, const bool* d_mask, const int* d_index, DataT* d_out0, DataT* d_out1, int batch_size, int n_obs, cudaStream_t stream) { if (n_obs == 1) { auto counting = thrust::make_counting_iterator(0); thrust::for_each( thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int i) { (d_mask[i] ? d_out1 : d_out0)[d_index[i]] = d_in[i]; }); } else { int TPB = std::min(64, n_obs); divide_by_mask_kernel<<<batch_size, TPB, 0, stream>>>( d_in, d_mask, d_index, d_out0, d_out1, n_obs); RAFT_CUDA_TRY(cudaPeekAtLastError()); } } /* A structure that defines a function to get the column of an element of * a matrix from its index. This makes possible a 2d scan with thrust. * Found in thrust/examples/scan_matrix_by_rows.cu */ struct which_col : thrust::unary_function<int, int> { MLCommon::FastIntDiv divisor; __host__ which_col(int col_length) : divisor(col_length) {} __host__ __device__ int operator()(int idx) const { return idx / divisor; } }; /** * Batch division by minimum value step 1: build an index of which sub-batch * each series belongs to, an index of the position of each series in its new * batch, and measure the size of each sub-batch * * @param[in] d_matrix Matrix of the values to minimize * Shape: (batch_size, n_sub) * @param[out] d_batch Which sub-batch each series belongs to * @param[out] d_index Index of each series in its new batch * @param[out] h_size Size of each sub-batch (host) * @param[in] batch_size Batch size * @param[in] n_sub Number of sub-batches * @param[in] stream CUDA stream */ template <typename DataT> inline void divide_by_min_build_index(const DataT* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub, cudaStream_t stream) { auto counting = thrust::make_counting_iterator(0); // In the first pass, compute d_batch and initialize the matrix that will // be used to compute d_size and d_index (1 for the first occurrence of the // minimum of each row, else 0) rmm::device_uvector<int> cumul(batch_size * n_sub, stream); int* d_cumul = cumul.data(); RAFT_CUDA_TRY(cudaMemsetAsync(d_cumul, 0, batch_size * n_sub * sizeof(int), stream)); thrust::for_each( thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int i) { int min_id = 0; DataT min_value = d_matrix[i]; for (int j = 1; j < n_sub; j++) { DataT Mij = d_matrix[j * batch_size + i]; min_id = (Mij < min_value) ? j : min_id; min_value = min(Mij, min_value); } d_batch[i] = min_id; d_cumul[min_id * batch_size + i] = 1; }); // In the second pass, we compute the cumulative sum of each column of this // mask matrix thrust::transform_iterator<which_col, thrust::counting_iterator<int>> t_first( counting, which_col(batch_size)); thrust::inclusive_scan_by_key( thrust::cuda::par.on(stream), t_first, t_first + batch_size * n_sub, d_cumul, d_cumul); // In the third pass, we compute d_index from d_cumul and d_batch thrust::for_each( thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int i) { d_index[i] = d_cumul[d_batch[i] * batch_size + i] - 1; }); // Finally we also compute h_size from d_cumul rmm::device_uvector<int> size_buffer(n_sub, stream); int* d_size = size_buffer.data(); thrust::for_each(thrust::cuda::par.on(stream), counting, counting + n_sub, [=] __device__(int j) { d_size[j] = d_cumul[(j + 1) * batch_size - 1]; }); raft::update_host(h_size, d_size, n_sub, stream); raft::interruptible::synchronize(stream); } /** * Batch division by minimum value step 2: create all the sub-batches * * @param[in] d_in Input batch * @param[in] d_batch Which sub-batch each series belongs to * @param[in] d_index Index of each series in its new sub-batch * @param[out] d_out Array of pointers to the arrays of each sub-batch * @param[in] n_obs Number of data points per series */ template <typename DataT> __global__ void divide_by_min_kernel( const DataT* d_in, const int* d_batch, const int* d_index, DataT** d_out, int n_obs) { const DataT* b_in = d_in + n_obs * blockIdx.x; DataT* b_out = d_out[d_batch[blockIdx.x]] + n_obs * d_index[blockIdx.x]; for (int i = threadIdx.x; i < n_obs; i += blockDim.x) { b_out[i] = b_in[i]; } } /** * Batch division by minimum value step 2: create all the sub-batches * * @param[in] d_in Input batch. Each series is a contiguous chunk * @param[in] d_batch Which sub-batch each series belongs to * @param[in] d_index Index of each series in its new sub-batch * @param[out] hd_out Host array of pointers to device arrays of each * sub-batch * @param[in] batch_size Batch size * @param[in] n_sub Number of sub-batches * @param[in] n_obs Number of data points per series * @param[in] stream CUDA stream */ template <typename DataT> inline void divide_by_min_execute(const DataT* d_in, const int* d_batch, const int* d_index, DataT** hd_out, int batch_size, int n_sub, int n_obs, cudaStream_t stream) { // Create a device array of pointers to each sub-batch rmm::device_uvector<DataT*> out_buffer(n_sub, stream); DataT** d_out = out_buffer.data(); raft::update_device(d_out, hd_out, n_sub, stream); if (n_obs == 1) { auto counting = thrust::make_counting_iterator(0); thrust::for_each( thrust::cuda::par.on(stream), counting, counting + batch_size, [=] __device__(int i) { d_out[d_batch[i]][d_index[i]] = d_in[i]; }); } else { int TPB = std::min(64, n_obs); divide_by_min_kernel<<<batch_size, TPB, 0, stream>>>(d_in, d_batch, d_index, d_out, n_obs); RAFT_CUDA_TRY(cudaPeekAtLastError()); } } /** * Kernel to build the division map * * @param[in] d_id Array of pointers to arrays containing the indices * of the members of each sub-batch * @param[in] d_size Array containing the size of each sub-batch * @param[out] d_id_to_pos Array containing the position of each member in * its new sub-batch * @param[out] d_id_to_model Array associating each member with its * sub-batch */ __global__ void build_division_map_kernel(const int* const* d_id, const int* d_size, int* d_id_to_pos, int* d_id_to_model) { const int* b_id = d_id[blockIdx.x]; int b_size = d_size[blockIdx.x]; for (int i = threadIdx.x; i < b_size; i += blockDim.x) { int original_id = b_id[i]; d_id_to_pos[original_id] = i; d_id_to_model[original_id] = blockIdx.x; } } /** * Build a map to associate each batch member with a model and index in the * associated sub-batch * * @param[in] hd_id Host array of pointers to device arrays containing * the indices of the members of each sub-batch * @param[in] h_size Host array containing the size of each sub-batch * @param[out] d_id_to_pos Device array containing the position of each * member in its new sub-batch * @param[out] d_id_to_model Device array associating each member with its * sub-batch * @param[in] batch_size Batch size * @param[in] n_sub Number of sub-batches * @param[in] stream CUDA stream */ inline void build_division_map(const int* const* hd_id, const int* h_size, int* d_id_to_pos, int* d_id_to_model, int batch_size, int n_sub, cudaStream_t stream) { // Copy the pointers to the id trackers of each sub-batch to the device rmm::device_uvector<int*> id_ptr_buffer(n_sub, stream); const int** d_id = const_cast<const int**>(id_ptr_buffer.data()); raft::update_device(d_id, hd_id, n_sub, stream); // Copy the size of each sub-batch to the device rmm::device_uvector<int> size_buffer(n_sub, stream); int* d_size = size_buffer.data(); raft::update_device(d_size, h_size, n_sub, stream); int avg_size = batch_size / n_sub; int TPB = avg_size > 256 ? 256 : (avg_size > 128 ? 128 : (avg_size > 64 ? 64 : 32)); build_division_map_kernel<<<n_sub, TPB, 0, stream>>>(d_id, d_size, d_id_to_pos, d_id_to_model); RAFT_CUDA_TRY(cudaPeekAtLastError()); } /** * Kernel to merge the series into a single batch * * @param[in] d_in Array of pointers to arrays containing the * sub-batches * @param[in] d_id_to_pos Array containing the position of each member in its * new sub-batch * @param[in] d_id_to_sub Array associating each member with its sub-batch * @param[out] d_out Output merged batch * @param[in] n_obs Number of observations (or forecasts) per series */ template <typename DataT> __global__ void merge_series_kernel( const DataT* const* d_in, const int* d_id_to_pos, const int* d_id_to_sub, DataT* d_out, int n_obs) { const DataT* b_in = d_in[d_id_to_sub[blockIdx.x]] + n_obs * d_id_to_pos[blockIdx.x]; DataT* b_out = d_out + n_obs * blockIdx.x; for (int i = threadIdx.x; i < n_obs; i += blockDim.x) { b_out[i] = b_in[i]; } } /** * Merge multiple sub-batches into one batch according to the maps that * associate each id in the unique batch to a sub-batch and a position in * this sub-batch. * * @param[in] hd_in Host array of pointers to device arrays containing * the sub-batches * @param[in] d_id_to_pos Device array containing the position of each member * in its new sub-batch * @param[in] d_id_to_sub Device array associating each member with its * sub-batch * @param[out] d_out Output merged batch * @param[in] batch_size Batch size * @param[in] n_sub Number of sub-batches * @param[in] n_obs Number of observations (or forecasts) per series * @param[in] stream CUDA stream */ template <typename DataT> inline void merge_series(const DataT* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, DataT* d_out, int batch_size, int n_sub, int n_obs, cudaStream_t stream) { // Copy the pointers to each sub-batch to the device rmm::device_uvector<DataT*> in_buffer(n_sub, stream); const DataT** d_in = const_cast<const DataT**>(in_buffer.data()); raft::update_device(d_in, hd_in, n_sub, stream); int TPB = std::min(64, n_obs); merge_series_kernel<<<batch_size, TPB, 0, stream>>>(d_in, d_id_to_pos, d_id_to_sub, d_out, n_obs); RAFT_CUDA_TRY(cudaPeekAtLastError()); } } // namespace TimeSeries } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsa/auto_arima.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/core/handle.hpp> #include "auto_arima.cuh" #include <cuml/tsa/auto_arima.h> namespace ML { int divide_by_mask_build_index(const raft::handle_t& handle, const bool* d_mask, int* d_index, int batch_size) { cudaStream_t stream = handle.get_stream(); return ML::TimeSeries::divide_by_mask_build_index(d_mask, d_index, batch_size, stream); } template <typename DataT> inline void divide_by_mask_execute_helper(const raft::handle_t& handle, const DataT* d_in, const bool* d_mask, const int* d_index, DataT* d_out0, DataT* d_out1, int batch_size, int n_obs) { cudaStream_t stream = handle.get_stream(); ML::TimeSeries::divide_by_mask_execute( d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs, stream); } void divide_by_mask_execute(const raft::handle_t& handle, const float* d_in, const bool* d_mask, const int* d_index, float* d_out0, float* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const raft::handle_t& handle, const double* d_in, const bool* d_mask, const int* d_index, double* d_out0, double* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const raft::handle_t& handle, const int* d_in, const bool* d_mask, const int* d_index, int* d_out0, int* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } template <typename DataT> inline void divide_by_min_build_index_helper(const raft::handle_t& handle, const DataT* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { cudaStream_t stream = handle.get_stream(); ML::TimeSeries::divide_by_min_build_index( d_matrix, d_batch, d_index, h_size, batch_size, n_sub, stream); } void divide_by_min_build_index(const raft::handle_t& handle, const float* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } void divide_by_min_build_index(const raft::handle_t& handle, const double* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } template <typename DataT> inline void divide_by_min_execute_helper(const raft::handle_t& handle, const DataT* d_in, const int* d_batch, const int* d_index, DataT** hd_out, int batch_size, int n_sub, int n_obs) { cudaStream_t stream = handle.get_stream(); ML::TimeSeries::divide_by_min_execute( d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs, stream); } void divide_by_min_execute(const raft::handle_t& handle, const float* d_in, const int* d_batch, const int* d_index, float** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const raft::handle_t& handle, const double* d_in, const int* d_batch, const int* d_index, double** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const raft::handle_t& handle, const int* d_in, const int* d_batch, const int* d_index, int** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void build_division_map(const raft::handle_t& handle, const int* const* hd_id, const int* h_size, int* d_id_to_pos, int* d_id_to_model, int batch_size, int n_sub) { cudaStream_t stream = handle.get_stream(); ML::TimeSeries::build_division_map( hd_id, h_size, d_id_to_pos, d_id_to_model, batch_size, n_sub, stream); } template <typename DataT> inline void merge_series_helper(const raft::handle_t& handle, const DataT* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, DataT* d_out, int batch_size, int n_sub, int n_obs) { cudaStream_t stream = handle.get_stream(); ML::TimeSeries::merge_series( hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs, stream); } void merge_series(const raft::handle_t& handle, const float* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, float* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } void merge_series(const raft::handle_t& handle, const double* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, double* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/tsa/stationarity.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/tsa/stationarity.h> #include <raft/core/handle.hpp> #include <timeSeries/stationarity.cuh> namespace ML { namespace Stationarity { template <typename DataT> inline void kpss_test_helper(const raft::handle_t& handle, const DataT* d_y, bool* results, int batch_size, int n_obs, int d, int D, int s, DataT pval_threshold) { const auto& handle_impl = handle; cudaStream_t stream = handle_impl.get_stream(); MLCommon::TimeSeries::kpss_test(d_y, results, batch_size, n_obs, d, D, s, stream, pval_threshold); } void kpss_test(const raft::handle_t& handle, const float* d_y, bool* results, int batch_size, int n_obs, int d, int D, int s, float pval_threshold) { kpss_test_helper<float>(handle, d_y, results, batch_size, n_obs, d, D, s, pval_threshold); } void kpss_test(const raft::handle_t& handle, const double* d_y, bool* results, int batch_size, int n_obs, int d, int D, int s, double pval_threshold) { kpss_test_helper<double>(handle, d_y, results, batch_size, n_obs, d, D, s, pval_threshold); } } // namespace Stationarity } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/explainer/tree_shap.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <GPUTreeShap/gpu_treeshap.h> #include <algorithm> #include <bitset> #include <cstddef> #include <cstdint> #include <cuml/explainer/tree_shap.hpp> #include <iostream> #include <limits> #include <memory> #include <numeric> #include <raft/core/error.hpp> #include <raft/core/span.hpp> #include <rmm/device_uvector.hpp> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/memory.h> #include <treelite/tree.h> #include <type_traits> #include <variant> #include <vector> namespace tl = treelite; /* All functions and classes defined in this anonymous namespace are strictly * for internal use by GPUTreeSHAP. */ namespace { // A poor man's bit field, to be used to account for categorical splits in SHAP computation // Inspired by xgboost::BitFieldContainer template <typename T, bool is_device> class BitField { private: static std::size_t constexpr kValueSize = sizeof(T) * 8; static std::size_t constexpr kOne = 1; // force correct data type raft::span<T, is_device> bits_; public: BitField() = default; __host__ __device__ explicit BitField(raft::span<T, is_device> bits) : bits_(bits) {} __host__ __device__ BitField(const BitField& other) : bits_(other.bits_) {} BitField& operator=(const BitField& other) = default; BitField& operator=(BitField&& other) = default; __host__ __device__ bool Check(std::size_t pos) const { T bitmask = kOne << (pos % kValueSize); return static_cast<bool>(bits_[pos / kValueSize] & bitmask); } __host__ __device__ void Set(std::size_t pos) { T bitmask = kOne << (pos % kValueSize); bits_[pos / kValueSize] |= bitmask; } __host__ __device__ void Intersect(const BitField other) { if (bits_.data() == other.bits_.data()) { return; } std::size_t size = min(bits_.size(), other.bits_.size()); for (std::size_t i = 0; i < size; ++i) { bits_[i] &= other.bits_[i]; } if (bits_.size() > size) { for (std::size_t i = size; i < bits_.size(); ++i) { bits_[i] = 0; } } } __host__ __device__ std::size_t Size() const { return kValueSize * bits_.size(); } __host__ static std::size_t ComputeStorageSize(std::size_t n_cat) { return n_cat / kValueSize + (n_cat % kValueSize != 0); } __host__ std::string ToString(bool reverse = false) const { std::ostringstream oss; oss << "Bits storage size: " << bits_.size() << ", elements: "; for (auto i = 0; i < bits_.size(); ++i) { std::bitset<kValueSize> bset(bits_[i]); std::string s = bset.to_string(); if (reverse) { std::reverse(s.begin(), s.end()); } oss << s << ", "; } return oss.str(); } static_assert(!std::is_signed<T>::value, "Must use unsigned type as underlying storage."); }; using CatBitFieldStorageT = std::uint32_t; template <bool is_device> using CatBitField = BitField<CatBitFieldStorageT, is_device>; using CatT = std::uint32_t; template <typename ThresholdType> struct SplitCondition { SplitCondition() = default; SplitCondition(ThresholdType feature_lower_bound, ThresholdType feature_upper_bound, bool is_missing_branch, tl::Operator comparison_op, CatBitField<false> categories) : feature_lower_bound(feature_lower_bound), feature_upper_bound(feature_upper_bound), is_missing_branch(is_missing_branch), comparison_op(comparison_op), categories(categories), d_categories() { if (feature_lower_bound > feature_upper_bound) { RAFT_FAIL("Lower bound cannot exceed upper bound"); } if (comparison_op != tl::Operator::kLT && comparison_op != tl::Operator::kLE && comparison_op != tl::Operator::kNone) { RAFT_FAIL("Unsupported comparison operator"); } } // Lower and upper bounds on feature values flowing down this path ThresholdType feature_lower_bound; ThresholdType feature_upper_bound; bool is_missing_branch; // Comparison operator used in the test. For now only < (kLT) and <= (kLE) // are supported. tl::Operator comparison_op; // List of matching categories for this path CatBitField<false> categories; CatBitField<true> d_categories; // Does this instance flow down this path? __host__ __device__ bool EvaluateSplit(ThresholdType x) const { #ifdef __CUDA_ARCH__ constexpr bool is_device = true; #else // __CUDA_ARCH__ constexpr bool is_device = false; #endif static_assert(std::is_floating_point<ThresholdType>::value, "x must be a floating point type"); auto max_representable_int = static_cast<ThresholdType>(uint64_t(1) << std::numeric_limits<ThresholdType>::digits); if (isnan(x)) { return is_missing_branch; } if constexpr (is_device) { if (d_categories.Size() != 0) { if (x < 0 || std::fabs(x) > max_representable_int) { return false; } return d_categories.Check(static_cast<std::size_t>(x)); } } else { if (categories.Size() != 0) { if (x < 0 || std::fabs(x) > max_representable_int) { return false; } return categories.Check(static_cast<std::size_t>(x)); } } if (comparison_op == tl::Operator::kLE) { return x > feature_lower_bound && x <= feature_upper_bound; } return x >= feature_lower_bound && x < feature_upper_bound; } // Combine two split conditions on the same feature __host__ __device__ void Merge(const SplitCondition& other) { // Combine duplicate features #ifdef __CUDA_ARCH__ constexpr bool is_device = true; #else // __CUDA_ARCH__ constexpr bool is_device = false; #endif bool has_category = false; if constexpr (is_device) { has_category = (d_categories.Size() != 0 || other.d_categories.Size() != 0); } else { has_category = (categories.Size() != 0 || other.categories.Size() != 0); } if (has_category) { if constexpr (is_device) { d_categories.Intersect(other.d_categories); } else { categories.Intersect(other.categories); } } else { feature_lower_bound = max(feature_lower_bound, other.feature_lower_bound); feature_upper_bound = min(feature_upper_bound, other.feature_upper_bound); } is_missing_branch = is_missing_branch && other.is_missing_branch; } static_assert(std::is_same<ThresholdType, float>::value || std::is_same<ThresholdType, double>::value, "ThresholdType must be a float or double"); }; template <typename ThresholdType, typename LeafType> struct CategoricalSplitCounter { int n_features; std::vector<CatT> n_categories; // n_categories[K] = number of category values for feature K // Set to 0 for numerical features std::vector<std::int64_t> feature_id; // feature_id[I] = feature ID associated with the I-th path segment CategoricalSplitCounter(int n_features) : n_features(n_features), n_categories(n_features, 0), feature_id() { } void node_handler(const tl::Tree<ThresholdType, LeafType>& tree, int, int parent_idx, int, float) { const auto split_index = tree.SplitIndex(parent_idx); if (tree.SplitType(parent_idx) == tl::SplitFeatureType::kCategorical) { CatT max_cat = 0; for (CatT cat : tree.MatchingCategories(parent_idx)) { if (cat > max_cat) { max_cat = cat; } } n_categories[split_index] = std::max(n_categories[split_index], max_cat + 1); } feature_id.push_back(split_index); } void root_handler(const tl::Tree<ThresholdType, LeafType>&, int, int, float) { feature_id.push_back(-1); } void new_path_handler() {} }; template <typename ThresholdType, typename LeafType> struct PathSegmentExtractor { using PathElementT = gpu_treeshap::PathElement<SplitCondition<ThresholdType>>; std::vector<PathElementT>& path_segments; std::size_t& path_idx; std::vector<CatBitFieldStorageT>& categorical_bitfields; const std::vector<std::size_t>& bitfield_segments; std::size_t path_segment_idx; static constexpr ThresholdType inf{std::numeric_limits<ThresholdType>::infinity()}; PathSegmentExtractor(std::vector<PathElementT>& path_segments, std::size_t& path_idx, std::vector<CatBitFieldStorageT>& categorical_bitfields, const std::vector<std::size_t>& bitfield_segments) : path_segments(path_segments), path_idx(path_idx), categorical_bitfields(categorical_bitfields), bitfield_segments(bitfield_segments), path_segment_idx(0) { } void node_handler(const tl::Tree<ThresholdType, LeafType>& tree, int child_idx, int parent_idx, int group_id, float v) { double zero_fraction = 1.0; bool has_count_info = false; if (tree.HasSumHess(parent_idx) && tree.HasSumHess(child_idx)) { zero_fraction = static_cast<double>(tree.SumHess(child_idx) / tree.SumHess(parent_idx)); has_count_info = true; } if (!has_count_info && tree.HasDataCount(parent_idx) && tree.HasDataCount(child_idx)) { zero_fraction = static_cast<double>(tree.DataCount(child_idx)) / tree.DataCount(parent_idx); has_count_info = true; } if (!has_count_info) { RAFT_FAIL("Tree model doesn't have data count information"); } // Encode the range of feature values that flow down this path bool is_left_path = tree.LeftChild(parent_idx) == child_idx; bool is_missing_branch = tree.DefaultChild(parent_idx) == child_idx; auto split_type = tree.SplitType(parent_idx); ThresholdType lower_bound, upper_bound; tl::Operator comparison_op; CatBitField<false> categories; if (split_type == tl::SplitFeatureType::kCategorical) { /* Create bit fields to store the list of categories associated with this path. The bit fields will be used to quickly decide whether a feature value should flow down down this path or not. The test in the test node is of form: x \in { list of category values } */ auto n_bitfields = bitfield_segments[path_segment_idx + 1] - bitfield_segments[path_segment_idx]; categories = CatBitField<false>(raft::span<CatBitFieldStorageT, false>( categorical_bitfields.data(), categorical_bitfields.size()) .subspan(bitfield_segments[path_segment_idx], n_bitfields)); for (CatT cat : tree.MatchingCategories(parent_idx)) { categories.Set(static_cast<std::size_t>(cat)); } // If this path is not the path that's taken when the categorical test evaluates to be true, // then flip all the bits in the bit fields. This step is needed because we first built // the bit fields according to the list given in the categorical test. bool use_right = tree.CategoriesListRightChild(parent_idx); if ((use_right && is_left_path) || (!use_right && !is_left_path)) { for (std::size_t i = bitfield_segments[path_segment_idx]; i < bitfield_segments[path_segment_idx + 1]; ++i) { categorical_bitfields[i] = ~categorical_bitfields[i]; } } lower_bound = -inf; upper_bound = inf; comparison_op = tl::Operator::kNone; } else { if (split_type != tl::SplitFeatureType::kNumerical) { // Assume: split is either numerical or categorical RAFT_FAIL("Unexpected split type: %d", static_cast<int>(split_type)); } categories = CatBitField<false>{}; lower_bound = is_left_path ? -inf : tree.Threshold(parent_idx); upper_bound = is_left_path ? tree.Threshold(parent_idx) : inf; comparison_op = tree.ComparisonOp(parent_idx); } path_segments.push_back(gpu_treeshap::PathElement<SplitCondition<ThresholdType>>{ path_idx, tree.SplitIndex(parent_idx), group_id, SplitCondition{lower_bound, upper_bound, is_missing_branch, comparison_op, categories}, zero_fraction, v}); ++path_segment_idx; } void root_handler(const tl::Tree<ThresholdType, LeafType>& tree, int child_idx, int group_id, float v) { // Root node has feature -1 auto comparison_op = tree.ComparisonOp(child_idx); path_segments.push_back(gpu_treeshap::PathElement<SplitCondition<ThresholdType>>{ path_idx, -1, group_id, SplitCondition{-inf, inf, false, comparison_op, {}}, 1.0, v}); ++path_segment_idx; } void new_path_handler() { ++path_idx; } }; }; // namespace namespace ML { namespace Explainer { template <typename ThresholdType> class TreePathInfo { public: int num_tree; float global_bias; std::size_t num_groups = 1; tl::TaskType task_type; tl::TaskParam task_param; bool average_tree_output; thrust::device_vector<gpu_treeshap::PathElement<SplitCondition<ThresholdType>>> path_segments; thrust::device_vector<CatBitFieldStorageT> categorical_bitfields; // bitfield_segments[I]: cumulative total count of all bit fields for path segments // 0, 1, ..., I-1 static_assert(std::is_same<ThresholdType, float>::value || std::is_same<ThresholdType, double>::value, "ThresholdType must be a float or double"); }; } // namespace Explainer } // namespace ML namespace { template <typename DataT> class DenseDatasetWrapper { const DataT* data; std::size_t num_rows; std::size_t num_cols; public: DenseDatasetWrapper() = default; DenseDatasetWrapper(const DataT* data, int num_rows, int num_cols) : data(data), num_rows(num_rows), num_cols(num_cols) { } __device__ DataT GetElement(std::size_t row_idx, std::size_t col_idx) const { return data[row_idx * num_cols + col_idx]; } __host__ __device__ std::size_t NumRows() const { return num_rows; } __host__ __device__ std::size_t NumCols() const { return num_cols; } }; template <typename ThresholdT, typename DataT> void post_process(ML::Explainer::TreePathInfo<ThresholdT>* path_info, std::size_t n_rows, std::size_t n_cols, DataT* out_preds, std::size_t pred_size, bool interactions) { auto count_iter = thrust::make_counting_iterator(0); auto num_tree = path_info->num_tree; auto global_bias = path_info->global_bias; auto num_groups = path_info->num_groups; if (path_info->average_tree_output) { thrust::for_each( thrust::device, count_iter, count_iter + pred_size, [=] __device__(std::size_t idx) { out_preds[idx] /= num_tree; }); } // Set the global bias if (interactions) { thrust::for_each(thrust::device, count_iter, count_iter + (n_rows * num_groups), [=] __device__(std::size_t idx) { size_t group = idx % num_groups; size_t row_idx = idx / num_groups; out_preds[gpu_treeshap::IndexPhiInteractions( row_idx, num_groups, group, n_cols, n_cols, n_cols)] += global_bias; }); } else { thrust::for_each( thrust::device, count_iter, count_iter + (n_rows * num_groups), [=] __device__(std::size_t idx) { out_preds[(idx + 1) * (n_cols + 1) - 1] += global_bias; }); } } template <typename ThresholdT, typename DataT> void gpu_treeshap_impl(ML::Explainer::TreePathInfo<ThresholdT>* path_info, const DataT* data, std::size_t n_rows, std::size_t n_cols, DataT* out_preds, std::size_t out_preds_size) { DenseDatasetWrapper<DataT> X(data, n_rows, n_cols); std::size_t pred_size = n_rows * path_info->num_groups * (n_cols + 1); ASSERT(pred_size <= out_preds_size, "Predictions array is too small."); gpu_treeshap::GPUTreeShap(X, path_info->path_segments.begin(), path_info->path_segments.end(), path_info->num_groups, thrust::device_pointer_cast(out_preds), thrust::device_pointer_cast(out_preds) + pred_size); // Post-processing post_process(path_info, n_rows, n_cols, out_preds, pred_size, false); } template <typename ThresholdT, typename DataT> void gpu_treeshap_interventional_impl(ML::Explainer::TreePathInfo<ThresholdT>* path_info, const DataT* data, std::size_t n_rows, std::size_t n_cols, const DataT* background_data, std::size_t background_n_rows, std::size_t background_n_cols, DataT* out_preds, std::size_t out_preds_size) { DenseDatasetWrapper<DataT> X(data, n_rows, n_cols); DenseDatasetWrapper<DataT> R(background_data, background_n_rows, background_n_cols); ASSERT(n_cols == background_n_cols, "Dataset and background dataset have different number of columns."); std::size_t pred_size = n_rows * path_info->num_groups * (n_cols + 1); ASSERT(pred_size <= out_preds_size, "Predictions array is too small."); gpu_treeshap::GPUTreeShapInterventional(X, R, path_info->path_segments.begin(), path_info->path_segments.end(), path_info->num_groups, thrust::device_pointer_cast(out_preds), thrust::device_pointer_cast(out_preds) + pred_size); // Post-processing post_process(path_info, n_rows, n_cols, out_preds, pred_size, false); } template <typename ThresholdT, typename DataT> void gpu_treeshap_interactions_impl(ML::Explainer::TreePathInfo<ThresholdT>* path_info, const DataT* data, std::size_t n_rows, std::size_t n_cols, DataT* out_preds, std::size_t out_preds_size) { DenseDatasetWrapper<DataT> X(data, n_rows, n_cols); std::size_t pred_size = n_rows * path_info->num_groups * (n_cols + 1) * (n_cols + 1); ASSERT(pred_size <= out_preds_size, "Predictions array is too small."); gpu_treeshap::GPUTreeShapInteractions(X, path_info->path_segments.begin(), path_info->path_segments.end(), path_info->num_groups, thrust::device_pointer_cast(out_preds), thrust::device_pointer_cast(out_preds) + pred_size); // Post-processing post_process(path_info, n_rows, n_cols, out_preds, pred_size, true); } template <typename ThresholdT, typename DataT> void gpu_treeshap_taylor_interactions_impl(ML::Explainer::TreePathInfo<ThresholdT>* path_info, const DataT* data, std::size_t n_rows, std::size_t n_cols, DataT* out_preds, std::size_t out_preds_size) { DenseDatasetWrapper<DataT> X(data, n_rows, n_cols); std::size_t pred_size = n_rows * path_info->num_groups * (n_cols + 1) * (n_cols + 1); ASSERT(pred_size <= out_preds_size, "Predictions array is too small."); gpu_treeshap::GPUTreeShapTaylorInteractions(X, path_info->path_segments.begin(), path_info->path_segments.end(), path_info->num_groups, thrust::device_pointer_cast(out_preds), thrust::device_pointer_cast(out_preds) + pred_size); // Post-processing post_process(path_info, n_rows, n_cols, out_preds, pred_size, true); } } // anonymous namespace namespace ML { namespace Explainer { // Traverse a path from the root node to a leaf node and call the handler functions for each node. // The fields group_id and v (leaf value) will be passed to the handler. template <typename ThresholdType, typename LeafType, typename PathHandler> void traverse_towards_leaf_node(const tl::Tree<ThresholdType, LeafType>& tree, int leaf_node_id, int group_id, float v, const std::vector<int>& parent_id, PathHandler& path_handler) { int child_idx = leaf_node_id; int parent_idx = parent_id[child_idx]; while (parent_idx != -1) { path_handler.node_handler(tree, child_idx, parent_idx, group_id, v); child_idx = parent_idx; parent_idx = parent_id[child_idx]; } path_handler.root_handler(tree, child_idx, group_id, v); } // Visit every path segments in a single tree and call handler functions for each segment. template <typename ThresholdType, typename LeafType, typename PathHandler> void visit_path_segments_in_tree(const std::vector<tl::Tree<ThresholdType, LeafType>>& tree_list, std::size_t tree_idx, bool use_vector_leaf, int num_groups, PathHandler& path_handler) { if (num_groups < 1) { RAFT_FAIL("num_groups must be at least 1"); } const tl::Tree<ThresholdType, LeafType>& tree = tree_list[tree_idx]; // Compute parent ID of each node std::vector<int> parent_id(tree.num_nodes, -1); for (int i = 0; i < tree.num_nodes; i++) { if (!tree.IsLeaf(i)) { parent_id[tree.LeftChild(i)] = i; parent_id[tree.RightChild(i)] = i; } } for (int nid = 0; nid < tree.num_nodes; nid++) { if (tree.IsLeaf(nid)) { // For each leaf node... // Extract path segments by traversing the path from the leaf node to the root node // If use_vector_leaf=True, repeat the path segments N times, where N = num_groups if (use_vector_leaf) { auto leaf_vector = tree.LeafVector(nid); if (leaf_vector.size() != static_cast<std::size_t>(num_groups)) { RAFT_FAIL("Expected leaf vector of length %d but got %d instead", num_groups, static_cast<int>(leaf_vector.size())); } for (int group_id = 0; group_id < num_groups; ++group_id) { traverse_towards_leaf_node( tree, nid, group_id, leaf_vector[group_id], parent_id, path_handler); path_handler.new_path_handler(); } } else { int group_id = static_cast<int>(tree_idx) % num_groups; auto leaf_value = tree.LeafValue(nid); traverse_towards_leaf_node(tree, nid, group_id, leaf_value, parent_id, path_handler); path_handler.new_path_handler(); } } } } // Visit every path segments in the whole tree ensemble model template <typename ThresholdType, typename LeafType, typename PathHandler> void visit_path_segments_in_model(const tl::ModelImpl<ThresholdType, LeafType>& model, PathHandler& path_handler) { int num_groups = 1; bool use_vector_leaf; if (model.task_param.num_class > 1) { num_groups = model.task_param.num_class; } if (model.task_type == tl::TaskType::kBinaryClfRegr || model.task_type == tl::TaskType::kMultiClfGrovePerClass) { use_vector_leaf = false; } else if (model.task_type == tl::TaskType::kMultiClfProbDistLeaf) { use_vector_leaf = true; } else { RAFT_FAIL("Unsupported task_type: %d", static_cast<int>(model.task_type)); } for (std::size_t tree_idx = 0; tree_idx < model.trees.size(); ++tree_idx) { visit_path_segments_in_tree(model.trees, tree_idx, use_vector_leaf, num_groups, path_handler); } } // Traverse a path from the root node to a leaf node and return the list of the path segments // Note: the path segments will have missing values in path_idx, group_id and v (leaf value). // The caller is responsible for filling in these fields. template <typename ThresholdType, typename LeafType> std::vector<gpu_treeshap::PathElement<SplitCondition<ThresholdType>>> traverse_towards_leaf_node( const tl::Tree<ThresholdType, LeafType>& tree, int leaf_node_id, const std::vector<int>& parent_id) { std::vector<gpu_treeshap::PathElement<SplitCondition<ThresholdType>>> path_segments; int child_idx = leaf_node_id; int parent_idx = parent_id[child_idx]; constexpr auto inf = std::numeric_limits<ThresholdType>::infinity(); tl::Operator comparison_op = tl::Operator::kNone; while (parent_idx != -1) { double zero_fraction = 1.0; bool has_count_info = false; if (tree.HasSumHess(parent_idx) && tree.HasSumHess(child_idx)) { zero_fraction = static_cast<double>(tree.SumHess(child_idx) / tree.SumHess(parent_idx)); has_count_info = true; } if (!has_count_info && tree.HasDataCount(parent_idx) && tree.HasDataCount(child_idx)) { zero_fraction = static_cast<double>(tree.DataCount(child_idx)) / tree.DataCount(parent_idx); has_count_info = true; } if (!has_count_info) { RAFT_FAIL("Tree model doesn't have data count information"); } // Encode the range of feature values that flow down this path bool is_left_path = tree.LeftChild(parent_idx) == child_idx; if (tree.SplitType(parent_idx) == tl::SplitFeatureType::kCategorical) { RAFT_FAIL( "Only trees with numerical splits are supported. " "Trees with categorical splits are not supported yet."); } ThresholdType lower_bound = is_left_path ? -inf : tree.Threshold(parent_idx); ThresholdType upper_bound = is_left_path ? tree.Threshold(parent_idx) : inf; comparison_op = tree.ComparisonOp(parent_idx); path_segments.push_back(gpu_treeshap::PathElement<SplitCondition<ThresholdType>>{ ~std::size_t(0), tree.SplitIndex(parent_idx), -1, SplitCondition{lower_bound, upper_bound, comparison_op}, zero_fraction, std::numeric_limits<float>::quiet_NaN()}); child_idx = parent_idx; parent_idx = parent_id[child_idx]; } // Root node has feature -1 comparison_op = tree.ComparisonOp(child_idx); // Build temporary path segments with unknown path_idx, group_id and leaf value path_segments.push_back(gpu_treeshap::PathElement<SplitCondition<ThresholdType>>{ ~std::size_t(0), -1, -1, SplitCondition{-inf, inf, comparison_op}, 1.0, std::numeric_limits<float>::quiet_NaN()}); return path_segments; } template <typename ThresholdType, typename LeafType> TreePathHandle extract_path_info_impl(const tl::ModelImpl<ThresholdType, LeafType>& model) { if (!std::is_same<ThresholdType, LeafType>::value) { RAFT_FAIL("ThresholdType and LeafType must be identical"); } if (!std::is_same<ThresholdType, float>::value && !std::is_same<ThresholdType, double>::value) { RAFT_FAIL("ThresholdType must be either float32 or float64"); } auto path_info = std::make_shared<TreePathInfo<ThresholdType>>(); /* 1. Scan the model for categorical splits and pre-allocate bit fields. */ CategoricalSplitCounter<ThresholdType, LeafType> cat_counter{model.num_feature}; visit_path_segments_in_model(model, cat_counter); std::size_t n_path_segments = cat_counter.feature_id.size(); std::vector<std::size_t> n_bitfields(n_path_segments, 0); // n_bitfields[I] : number of bit fields for path segment I std::transform(cat_counter.feature_id.cbegin(), cat_counter.feature_id.cend(), n_bitfields.begin(), [&](std::int64_t fid) -> std::size_t { if (fid == -1) { return 0; } return CatBitField<false>::ComputeStorageSize(cat_counter.n_categories[fid]); }); std::vector<std::size_t> bitfield_segments(n_path_segments + 1, 0); std::inclusive_scan(n_bitfields.cbegin(), n_bitfields.cend(), bitfield_segments.begin() + 1); std::vector<CatBitFieldStorageT> categorical_bitfields(bitfield_segments.back(), 0); /* 2. Scan the model again, to extract path segments. */ // Each path segment will have path_idx field, which uniquely identifies the path to which the // segment belongs. std::size_t path_idx = 0; std::vector<gpu_treeshap::PathElement<SplitCondition<ThresholdType>>> path_segments; PathSegmentExtractor<ThresholdType, LeafType> path_extractor{ path_segments, path_idx, categorical_bitfields, bitfield_segments}; visit_path_segments_in_model(model, path_extractor); // Marshall bit fields to GPU memory path_info->categorical_bitfields = thrust::device_vector<CatBitFieldStorageT>( categorical_bitfields.cbegin(), categorical_bitfields.cend()); for (std::size_t path_seg_idx = 0; path_seg_idx < path_segments.size(); ++path_seg_idx) { auto n_bitfields = bitfield_segments[path_seg_idx + 1] - bitfield_segments[path_seg_idx]; path_segments[path_seg_idx].split_condition.d_categories = CatBitField<true>(raft::span<CatBitFieldStorageT, true>( thrust::raw_pointer_cast(path_info->categorical_bitfields.data()), path_info->categorical_bitfields.size()) .subspan(bitfield_segments[path_seg_idx], n_bitfields)); } path_info->path_segments = path_segments; path_info->global_bias = model.param.global_bias; path_info->task_type = model.task_type; path_info->task_param = model.task_param; path_info->average_tree_output = model.average_tree_output; path_info->num_tree = static_cast<int>(model.trees.size()); if (path_info->task_param.num_class > 1) { path_info->num_groups = static_cast<std::size_t>(path_info->task_param.num_class); } return path_info; } TreePathHandle extract_path_info(ModelHandle model) { const tl::Model& model_ref = *static_cast<tl::Model*>(model); return model_ref.Dispatch([&](const auto& model_inner) { // model_inner is of the concrete type tl::ModelImpl<threshold_t, leaf_t> return extract_path_info_impl(model_inner); }); } template <typename VariantT, typename... Targs> bool variants_hold_same_type(VariantT& first, Targs... args) { bool is_same = true; std::visit( [&](auto v) { for (const auto& x : {args...}) { is_same = is_same && std::holds_alternative<decltype(v)>(x); } }, first); return is_same; } void gpu_treeshap(TreePathHandle path_info, const FloatPointer data, std::size_t n_rows, std::size_t n_cols, FloatPointer out_preds, std::size_t out_preds_size) { ASSERT(variants_hold_same_type(data, out_preds), "Expected variant inputs to have the same data type."); std::visit( [&](auto& tree_info, auto data_) { gpu_treeshap_impl(tree_info.get(), data_, n_rows, n_cols, std::get<decltype(data_)>(out_preds), out_preds_size); }, path_info, data); } void gpu_treeshap_interventional(TreePathHandle path_info, const FloatPointer data, std::size_t n_rows, std::size_t n_cols, const FloatPointer background_data, std::size_t background_n_rows, std::size_t background_n_cols, FloatPointer out_preds, std::size_t out_preds_size) { ASSERT(variants_hold_same_type(data, background_data, out_preds), "Expected variant inputs to have the same data type."); std::visit( [&](auto& tree_info, auto data_) { gpu_treeshap_interventional_impl(tree_info.get(), data_, n_rows, n_cols, std::get<decltype(data_)>(background_data), background_n_rows, background_n_cols, std::get<decltype(data_)>(out_preds), out_preds_size); }, path_info, data); } void gpu_treeshap_interactions(TreePathHandle path_info, const FloatPointer data, std::size_t n_rows, std::size_t n_cols, FloatPointer out_preds, std::size_t out_preds_size) { ASSERT(variants_hold_same_type(data, out_preds), "Expected variant inputs to have the same data type."); std::visit( [&](auto& tree_info, auto data_) { gpu_treeshap_interactions_impl(tree_info.get(), data_, n_rows, n_cols, std::get<decltype(data_)>(out_preds), out_preds_size); }, path_info, data); } void gpu_treeshap_taylor_interactions(TreePathHandle path_info, const FloatPointer data, std::size_t n_rows, std::size_t n_cols, FloatPointer out_preds, std::size_t out_preds_size) { ASSERT(variants_hold_same_type(data, out_preds), "Expected variant inputs to have the same data type."); std::visit( [&](auto& tree_info, auto data_) { gpu_treeshap_taylor_interactions_impl(tree_info.get(), data_, n_rows, n_cols, std::get<decltype(data_)>(out_preds), out_preds_size); }, path_info, data); } } // namespace Explainer } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/explainer/permutation_shap.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/core/handle.hpp> #include <raft/util/cudart_utils.hpp> #include <cuml/explainer/permutation_shap.hpp> namespace ML { namespace Explainer { template <typename DataT, typename IdxT> __global__ void _fused_tile_scatter_pe(DataT* dataset, const DataT* background, IdxT nrows_dataset, IdxT ncols, const DataT* obs, IdxT* idx, IdxT nrows_background, IdxT sc_size, bool row_major) { // kernel that actually does the scattering as described in the // descriptions of `permutation_dataset` and `shap_main_effect_dataset` // parameter sc_size allows us to generate both the permuation_shap_dataset // and the main_effect_dataset with the same kernel, since they do the // scattering in the same manner, its just the "height" of the columns // generated from values that is different. IdxT tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid < ncols * nrows_dataset) { IdxT row, col, start, end; if (row_major) { row = tid / ncols; // we calculate the first row where the entry of dataset will be // entered into background depending on its place in the index array col = idx[tid % ncols]; start = ((tid % ncols) + 1) * nrows_background; // each entry of the dataset will be input the same number of times // to the matrix, controlled by the sc_size parameter end = start + sc_size * nrows_background; // now we just need to check if this thread is between start and end // if it is then the value should be based on the observation obs // otherwise on the background dataset if ((start <= row && row < end)) { dataset[row * ncols + col] = obs[col]; } else { dataset[row * ncols + col] = background[(row % nrows_background) * ncols + col]; } } else { col = tid / nrows_dataset; row = tid % nrows_dataset; // main difference between row and col major is how do we calculate // the end and start and which row corresponds to each thread start = nrows_background + idx[col] * nrows_background; // calculation of end position is identical end = start + sc_size * nrows_background; if ((start <= row && row < end)) { dataset[tid] = obs[col]; } else { dataset[tid] = background[row + nrows_background * col]; } } } } template <typename DataT, typename IdxT> void permutation_shap_dataset_impl(const raft::handle_t& handle, DataT* dataset, const DataT* background, IdxT nrows_background, IdxT ncols, const DataT* row, IdxT* idx, bool row_major) { const auto& handle_impl = handle; cudaStream_t stream = handle_impl.get_stream(); // we calculate the number of rows in the dataset and then multiply by 2 since // we are adding a forward and backward permutation (see docstring in header file) IdxT nrows_dataset = (2 * ncols * nrows_background + nrows_background); constexpr IdxT nthreads = 512; IdxT nblks = (nrows_dataset * ncols + nthreads - 1) / nthreads; // each thread calculates a single element // for the permutation shap dataset we need the sc_size parameter to be ncols _fused_tile_scatter_pe<<<nblks, nthreads, 0, stream>>>( dataset, background, nrows_dataset, ncols, row, idx, nrows_background, ncols, row_major); RAFT_CUDA_TRY(cudaPeekAtLastError()); } void permutation_shap_dataset(const raft::handle_t& handle, float* dataset, const float* background, int nrows_bg, int ncols, const float* row, int* idx, bool row_major) { permutation_shap_dataset_impl(handle, dataset, background, nrows_bg, ncols, row, idx, row_major); } void permutation_shap_dataset(const raft::handle_t& handle, double* dataset, const double* background, int nrows_bg, int ncols, const double* row, int* idx, bool row_major) { permutation_shap_dataset_impl(handle, dataset, background, nrows_bg, ncols, row, idx, row_major); } template <typename DataT, typename IdxT> void shap_main_effect_dataset_impl(const raft::handle_t& handle, DataT* dataset, const DataT* background, IdxT nrows_bg, IdxT ncols, const DataT* row, IdxT* idx, bool row_major) { const auto& handle_impl = handle; cudaStream_t stream = handle_impl.get_stream(); // we calculate the number of elements in the dataset IdxT total_num_elements = (nrows_bg * ncols + nrows_bg) * ncols; constexpr IdxT nthreads = 512; IdxT nblks = (total_num_elements + nthreads - 1) / nthreads; // each thread calculates a single element // for the permutation shap dataset we need the sc_size parameter to be 1 _fused_tile_scatter_pe<<<nblks, nthreads, 0, stream>>>( dataset, background, total_num_elements / ncols, ncols, row, idx, nrows_bg, 1, row_major); RAFT_CUDA_TRY(cudaPeekAtLastError()); } void shap_main_effect_dataset(const raft::handle_t& handle, float* dataset, const float* background, int nrows_bg, int ncols, const float* row, int* idx, bool row_major) { shap_main_effect_dataset_impl(handle, dataset, background, nrows_bg, ncols, row, idx, row_major); } void shap_main_effect_dataset(const raft::handle_t& handle, double* dataset, const double* background, int nrows_bg, int ncols, const double* row, int* idx, bool row_major) { shap_main_effect_dataset_impl(handle, dataset, background, nrows_bg, ncols, row, idx, row_major); } template <typename DataT, typename IdxT> __global__ void update_perm_shap_values_kernel(DataT* output, const DataT* input, const IdxT ncols, const IdxT* idx) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < ncols) { DataT result = output[idx[tid]]; result += input[tid + 1] - input[tid]; result += input[tid + ncols] - input[tid + ncols + 1]; output[idx[tid]] = result; } } template <typename DataT, typename IdxT> void update_perm_shap_values_impl(const raft::handle_t& handle, DataT* shap_values, const DataT* y_hat, const IdxT ncols, const IdxT* idx) { const auto& handle_impl = handle; cudaStream_t stream = handle_impl.get_stream(); constexpr IdxT nthreads = 512; IdxT nblks = ncols / nthreads + 1; update_perm_shap_values_kernel<<<nblks, nthreads, 0, 0>>>(shap_values, y_hat, ncols, idx); RAFT_CUDA_TRY(cudaPeekAtLastError()); } void update_perm_shap_values(const raft::handle_t& handle, float* shap_values, const float* y_hat, const int ncols, const int* idx) { update_perm_shap_values_impl(handle, shap_values, y_hat, ncols, idx); } void update_perm_shap_values(const raft::handle_t& handle, double* shap_values, const double* y_hat, const int ncols, const int* idx) { update_perm_shap_values_impl(handle, shap_values, y_hat, ncols, idx); } } // namespace Explainer } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/explainer/kernel_shap.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/core/handle.hpp> #include <raft/util/cudart_utils.hpp> #include <cuml/explainer/kernel_shap.hpp> #include <curand.h> #include <curand_kernel.h> namespace ML { namespace Explainer { /* * Kernel distrubutes exact part of the kernel shap dataset * Each block scatters the data of a row of `observations` into the (number of rows of * background) in `dataset`, based on the row of `X`. * So, given: * background = [[0, 1, 2], [3, 4, 5]] * observation = [100, 101, 102] * X = [[1, 0, 1], * [0, 1, 1]] * * dataset (output): * [[100, 1, 102], * [100, 4, 102] * [0, 101, 102], * [3, 101, 102]] * * */ template <typename DataT, typename IdxT> __global__ void exact_rows_kernel(float* X, IdxT nrows_X, IdxT ncols, DataT* background, IdxT nrows_background, DataT* dataset, DataT* observation) { // Each block processes one row of X. Columns are iterated over by blockDim.x at a time to ensure // data coelescing int col = threadIdx.x; int row = blockIdx.x * ncols; while (col < ncols) { // Load the X idx for the current column int curr_X = (int)X[row + col]; // Iterate over nrows_background int row_idx_base = blockIdx.x * nrows_background; for (int r = 0; r < nrows_background; r++) { int row_idx = row_idx_base + r; if (curr_X == 0) { dataset[row_idx * ncols + col] = background[r * ncols + col]; } else { dataset[row_idx * ncols + col] = observation[col]; } } // Increment the column col += blockDim.x; } } /* * Kernel distributes sampled part of the kernel shap dataset * The first thread of each block calculates the sampling of `k` entries of `observation` * to scatter into `dataset`. Afterwards each block scatters the data of a row of `X` into the (number of rows of * background) in `dataset`. * So, given: * background = [[0, 1, 2, 3], [5, 6, 7, 8]] * observation = [100, 101, 102, 103] * nsamples = [3, 2] * * X (output) * [[1, 0, 1, 1], * [0, 1, 1, 0]] * * dataset (output): * [[100, 1, 102, 103], * [100, 6, 102, 103] * [0, 101, 102, 3], * [5, 101, 102, 8]] * * */ template <typename DataT, typename IdxT> __global__ void sampled_rows_kernel(IdxT* nsamples, float* X, IdxT nrows_X, IdxT ncols, DataT* background, IdxT nrows_background, DataT* dataset, DataT* observation, uint64_t seed) { int tid = threadIdx.x + blockIdx.x * blockDim.x; // see what k this block will generate int k_blk = nsamples[blockIdx.x]; // First k threads of block generate samples if (threadIdx.x < k_blk) { curandStatePhilox4_32_10_t state; curand_init((unsigned long long)seed, (unsigned long long)tid, 0, &state); int rand_idx = (int)(curand_uniform(&state) * ncols); // Since X is initialized to 0, we quickly check for collisions (if k_blk << ncols the // likelihood of collisions is low) while (atomicExch(&(X[2 * blockIdx.x * ncols + rand_idx]), 1) == 1) { rand_idx = (int)(curand_uniform(&state) * ncols); } } __syncthreads(); // Each block processes one row of X. Columns are iterated over by blockDim.x at a time to ensure // data coelescing int col_idx = threadIdx.x; while (col_idx < ncols) { // Load the X idx for the current column int curr_X = (int)X[2 * blockIdx.x * ncols + col_idx]; X[(2 * blockIdx.x + 1) * ncols + col_idx] = 1 - curr_X; int bg_row_idx_base = 2 * blockIdx.x * nrows_background; for (int r = 0; r < nrows_background; r++) { int bg_row_idx = bg_row_idx_base + r; if (curr_X == 0) { dataset[bg_row_idx * ncols + col_idx] = background[r * ncols + col_idx]; } else { dataset[bg_row_idx * ncols + col_idx] = observation[col_idx]; } } bg_row_idx_base = 2 * (blockIdx.x + 1) * nrows_background; for (int r = 0; r < nrows_background; r++) { int bg_row_idx = bg_row_idx_base + r; if (curr_X == 0) { dataset[bg_row_idx * ncols + col_idx] = observation[col_idx]; } else { dataset[bg_row_idx * ncols + col_idx] = background[r * ncols + col_idx]; } } col_idx += blockDim.x; } } template <typename DataT, typename IdxT> void kernel_dataset_impl(const raft::handle_t& handle, float* X, IdxT nrows_X, IdxT ncols, DataT* background, IdxT nrows_background, DataT* dataset, DataT* observation, int* nsamples, int len_samples, int maxsample, uint64_t seed) { const auto& handle_impl = handle; cudaStream_t stream = handle_impl.get_stream(); IdxT nblks; IdxT nthreads; nthreads = min(512, ncols); nblks = nrows_X - len_samples; if (nblks > 0) { exact_rows_kernel<<<nblks, nthreads, 0, stream>>>( X, nrows_X, ncols, background, nrows_background, dataset, observation); } RAFT_CUDA_TRY(cudaPeekAtLastError()); // check if random part of the dataset is needed if (len_samples > 0) { nblks = len_samples / 2; // each block does a sample and its compliment sampled_rows_kernel<<<nblks, nthreads, 0, stream>>>( nsamples, &X[(nrows_X - len_samples) * ncols], len_samples, ncols, background, nrows_background, &dataset[(nrows_X - len_samples) * nrows_background * ncols], observation, seed); } RAFT_CUDA_TRY(cudaPeekAtLastError()); } void kernel_dataset(const raft::handle_t& handle, float* X, int nrows_X, int ncols, float* background, int nrows_background, float* dataset, float* observation, int* nsamples, int len_nsamples, int maxsample, uint64_t seed) { kernel_dataset_impl(handle, X, nrows_X, ncols, background, nrows_background, dataset, observation, nsamples, len_nsamples, maxsample, seed); } void kernel_dataset(const raft::handle_t& handle, float* X, int nrows_X, int ncols, double* background, int nrows_background, double* dataset, double* observation, int* nsamples, int len_nsamples, int maxsample, uint64_t seed) { kernel_dataset_impl(handle, X, nrows_X, ncols, background, nrows_background, dataset, observation, nsamples, len_nsamples, maxsample, seed); } } // namespace Explainer } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/holtwinters/holtwinters.cu
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "runner.cuh" #include <cuml/tsa/holtwinters.h> namespace ML { namespace HoltWinters { void buffer_size(int n, int batch_size, int frequency, int* start_leveltrend_len, int* start_season_len, int* components_len, int* error_len, int* leveltrend_coef_shift, int* season_coef_shift) { bool use_beta = true, use_gamma = true; ML::HoltWintersBufferSize(n, batch_size, frequency, use_beta, use_gamma, start_leveltrend_len, start_season_len, components_len, error_len, leveltrend_coef_shift, season_coef_shift); } void fit(const raft::handle_t& handle, int n, int batch_size, int frequency, int start_periods, ML::SeasonalType seasonal, float epsilon, float* data, float* level_d, float* trend_d, float* season_d, float* error_d) { ML::HoltWintersFitHelper<float>(handle, n, batch_size, frequency, start_periods, seasonal, epsilon, data, level_d, trend_d, season_d, error_d); } void fit(const raft::handle_t& handle, int n, int batch_size, int frequency, int start_periods, ML::SeasonalType seasonal, double epsilon, double* data, double* level_d, double* trend_d, double* season_d, double* error_d) { ML::HoltWintersFitHelper<double>(handle, n, batch_size, frequency, start_periods, seasonal, epsilon, data, level_d, trend_d, season_d, error_d); } void forecast(const raft::handle_t& handle, int n, int batch_size, int frequency, int h, ML::SeasonalType seasonal, float* level_d, float* trend_d, float* season_d, float* forecast_d) { ML::HoltWintersForecastHelper<float>( handle, n, batch_size, frequency, h, seasonal, level_d, trend_d, season_d, forecast_d); } void forecast(const raft::handle_t& handle, int n, int batch_size, int frequency, int h, ML::SeasonalType seasonal, double* level_d, double* trend_d, double* season_d, double* forecast_d) { ML::HoltWintersForecastHelper<double>( handle, n, batch_size, frequency, h, seasonal, level_d, trend_d, season_d, forecast_d); } } // namespace HoltWinters } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/holtwinters/holtwinters_api.cpp
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/tsa/holtwinters_api.h> #include <common/cumlHandle.hpp> #include <cuml/tsa/holtwinters.h> extern "C" { cumlError_t cumlHoltWinters_buffer_size(int n, int batch_size, int frequency, int* start_leveltrend_len, int* start_season_len, int* components_len, int* error_len, int* leveltrend_coef_shift, int* season_coef_shift) { cumlError_t status; try { ML::HoltWinters::buffer_size(n, batch_size, frequency, start_leveltrend_len, start_season_len, components_len, error_len, leveltrend_coef_shift, season_coef_shift); status = CUML_SUCCESS; } catch (...) { status = CUML_ERROR_UNKNOWN; } return status; } cumlError_t cumlHoltWintersSp_fit(cumlHandle_t handle, int n, int batch_size, int frequency, int start_periods, cumlHoltWintersSeasonal_t seasonal, float epsilon, float* data, float* level_d, float* trend_d, float* season_d, float* error_d) { cumlError_t status; raft::handle_t* handle_ptr; std::tie(handle_ptr, status) = ML::handleMap.lookupHandlePointer(handle); if (status == CUML_SUCCESS) { try { ML::SeasonalType seasonal_type = (ML::SeasonalType)seasonal; ML::HoltWinters::fit(*handle_ptr, n, batch_size, frequency, start_periods, seasonal_type, epsilon, data, level_d, trend_d, season_d, error_d); } catch (...) { status = CUML_ERROR_UNKNOWN; } } return status; } cumlError_t cumlHoltWintersDp_fit(cumlHandle_t handle, int n, int batch_size, int frequency, int start_periods, cumlHoltWintersSeasonal_t seasonal, double epsilon, double* data, double* level_d, double* trend_d, double* season_d, double* error_d) { cumlError_t status; raft::handle_t* handle_ptr; std::tie(handle_ptr, status) = ML::handleMap.lookupHandlePointer(handle); if (status == CUML_SUCCESS) { try { ML::SeasonalType seasonal_type = (ML::SeasonalType)seasonal; ML::HoltWinters::fit(*handle_ptr, n, batch_size, frequency, start_periods, seasonal_type, epsilon, data, level_d, trend_d, season_d, error_d); } catch (...) { status = CUML_ERROR_UNKNOWN; } } return status; } cumlError_t cumlHoltWintersSp_forecast(cumlHandle_t handle, int n, int batch_size, int frequency, int h, cumlHoltWintersSeasonal_t seasonal, float* level_d, float* trend_d, float* season_d, float* forecast_d) { cumlError_t status; raft::handle_t* handle_ptr; std::tie(handle_ptr, status) = ML::handleMap.lookupHandlePointer(handle); if (status == CUML_SUCCESS) { try { ML::SeasonalType seasonal_type = (ML::SeasonalType)seasonal; ML::HoltWinters::forecast(*handle_ptr, n, batch_size, frequency, h, seasonal_type, level_d, trend_d, season_d, forecast_d); } catch (...) { status = CUML_ERROR_UNKNOWN; } } return status; } cumlError_t cumlHoltWintersDp_forecast(cumlHandle_t handle, int n, int batch_size, int frequency, int h, cumlHoltWintersSeasonal_t seasonal, double* level_d, double* trend_d, double* season_d, double* forecast_d) { cumlError_t status; raft::handle_t* handle_ptr; std::tie(handle_ptr, status) = ML::handleMap.lookupHandlePointer(handle); if (status == CUML_SUCCESS) { try { ML::SeasonalType seasonal_type = (ML::SeasonalType)seasonal; ML::HoltWinters::forecast(*handle_ptr, n, batch_size, frequency, h, seasonal_type, level_d, trend_d, season_d, forecast_d); } catch (...) { status = CUML_ERROR_UNKNOWN; } } return status; } }
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/holtwinters/runner.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "internal/hw_decompose.cuh" #include "internal/hw_eval.cuh" #include "internal/hw_forecast.cuh" #include "internal/hw_optim.cuh" #include <cuml/tsa/holtwinters_params.h> #include <raft/util/cudart_utils.hpp> // #TODO: Replace with public header when ready #include <raft/linalg/detail/cublas_wrappers.hpp> #include <raft/linalg/transpose.cuh> #include <rmm/device_uvector.hpp> namespace ML { template <typename Dtype> void HWTranspose(const raft::handle_t& handle, Dtype* data_in, int m, int n, Dtype* data_out) { ASSERT(!(!data_in || !data_out || n < 1 || m < 1), "HW error in in line %d", __LINE__); const raft::handle_t& handle_impl = handle; raft::stream_syncer _(handle_impl); cudaStream_t stream = handle_impl.get_stream(); cublasHandle_t cublas_h = handle_impl.get_cublas_handle(); raft::linalg::transpose<Dtype>(handle, data_in, data_out, n, m, stream); } void HoltWintersBufferSize(int n, int batch_size, int frequency, bool use_beta, bool use_gamma, int* start_leveltrend_len, int* start_season_len, int* components_len, int* error_len, int* leveltrend_coef_shift, int* season_coef_shift) { int w_len = use_gamma ? frequency : (use_beta ? 2 : 1); if (start_leveltrend_len) *start_leveltrend_len = batch_size; if (use_gamma && start_season_len) *start_season_len = frequency * batch_size; if (components_len) *components_len = (n - w_len) * batch_size; if (leveltrend_coef_shift) *leveltrend_coef_shift = (n - w_len - 1) * batch_size; if (use_gamma && season_coef_shift) *season_coef_shift = (n - w_len - frequency) * batch_size; if (error_len) *error_len = batch_size; } template <typename Dtype> void HoltWintersDecompose(const raft::handle_t& handle, const Dtype* ts, int n, int batch_size, int frequency, Dtype* start_level, Dtype* start_trend, Dtype* start_season, int start_periods, ML::SeasonalType seasonal) { const raft::handle_t& handle_impl = handle; raft::stream_syncer _(handle_impl); cudaStream_t stream = handle_impl.get_stream(); cublasHandle_t cublas_h = handle_impl.get_cublas_handle(); if (start_level != nullptr && start_trend == nullptr && start_season == nullptr) { // level decomposition raft::copy(start_level, ts, batch_size, stream); } else if (start_level != nullptr && start_trend != nullptr && start_season == nullptr) { // trend decomposition raft::copy(start_level, ts + batch_size, batch_size, stream); raft::copy(start_trend, ts + batch_size, batch_size, stream); const Dtype alpha = -1.; // #TODO: Call from public API when ready RAFT_CUBLAS_TRY(raft::linalg::detail::cublasaxpy( cublas_h, batch_size, &alpha, ts, 1, start_trend, 1, stream)); // cublas::axpy(batch_size, (Dtype)-1., ts, start_trend); } else if (start_level != nullptr && start_trend != nullptr && start_season != nullptr) { stl_decomposition_gpu(handle_impl, ts, n, batch_size, frequency, start_periods, start_level, start_trend, start_season, seasonal); } } template <typename Dtype> void HoltWintersEval(const raft::handle_t& handle, const Dtype* ts, int n, int batch_size, int frequency, const Dtype* start_level, const Dtype* start_trend, const Dtype* start_season, const Dtype* alpha, const Dtype* beta, const Dtype* gamma, Dtype* level, Dtype* trend, Dtype* season, Dtype* xhat, Dtype* error, ML::SeasonalType seasonal) { const raft::handle_t& handle_impl = handle; raft::stream_syncer _(handle_impl); cudaStream_t stream = handle_impl.get_stream(); ASSERT(!((!start_trend) != (!beta) || (!start_season) != (!gamma)), "HW error in in line %d", __LINE__); ASSERT(!(!alpha || !start_level), "HW error in in line %d", __LINE__); ASSERT(!(start_season != nullptr && frequency < 2), "HW error in in line %d", __LINE__); if (!(!level && !trend && !season && !xhat && !error)) { holtwinters_eval_gpu(handle_impl, ts, n, batch_size, frequency, start_level, start_trend, start_season, alpha, beta, gamma, level, trend, season, xhat, error, seasonal); } } // expose line search step size - https://github.com/rapidsai/cuml/issues/886 // Also, precision errors arise in optimization. There's floating point instability, // and epsilon majorly influences the fitting based on precision. For a summary, // https://github.com/rapidsai/cuml/issues/888 template <typename Dtype> void HoltWintersOptim(const raft::handle_t& handle, const Dtype* ts, int n, int batch_size, int frequency, const Dtype* start_level, const Dtype* start_trend, const Dtype* start_season, Dtype* alpha, bool optim_alpha, Dtype* beta, bool optim_beta, Dtype* gamma, bool optim_gamma, Dtype epsilon, Dtype* level, Dtype* trend, Dtype* season, Dtype* xhat, Dtype* error, OptimCriterion* optim_result, OptimParams<Dtype>* optim_params, ML::SeasonalType seasonal) { const raft::handle_t& handle_impl = handle; raft::stream_syncer _(handle_impl); cudaStream_t stream = handle_impl.get_stream(); // default values OptimParams<Dtype> optim_params_; optim_params_.eps = epsilon; optim_params_.min_param_diff = (Dtype)1e-8; optim_params_.min_error_diff = (Dtype)1e-8; optim_params_.min_grad_norm = (Dtype)1e-4; optim_params_.bfgs_iter_limit = 1000; optim_params_.linesearch_iter_limit = 100; optim_params_.linesearch_tau = (Dtype)0.5; optim_params_.linesearch_c = (Dtype)0.8; optim_params_.linesearch_step_size = (Dtype)-1; if (optim_params) { if (optim_params->eps > .0) optim_params_.eps = optim_params->eps; if (optim_params->min_param_diff >= .0) optim_params_.min_param_diff = optim_params->min_param_diff; if (optim_params->min_error_diff >= .0) optim_params_.min_error_diff = optim_params->min_error_diff; if (optim_params->min_grad_norm >= .0) optim_params_.min_grad_norm = optim_params->min_grad_norm; if (optim_params->bfgs_iter_limit > 0) optim_params_.bfgs_iter_limit = optim_params->bfgs_iter_limit; if (optim_params->linesearch_iter_limit > 0) optim_params_.linesearch_iter_limit = optim_params->linesearch_iter_limit; if (optim_params->linesearch_tau > .0) optim_params_.linesearch_tau = optim_params->linesearch_tau; if (optim_params->linesearch_c > .0) optim_params_.linesearch_c = optim_params->linesearch_c; if (optim_params->linesearch_step_size > 0) optim_params_.linesearch_step_size = optim_params->linesearch_step_size; } ASSERT(alpha && start_level, "HW error in in line %d", __LINE__); ASSERT(!((!start_trend) != (!beta) || (!start_season) != (!gamma)), "HW error in in line %d", __LINE__); ASSERT(!(start_season && frequency < 2), "HW error in in line %d", __LINE__); ASSERT(!(!optim_alpha && !optim_beta && !optim_gamma), "HW error in in line %d", __LINE__); ASSERT(!((optim_beta && !beta) || (optim_gamma && !gamma)), "HW error in in line %d", __LINE__); if (!(!alpha && !beta && !gamma & !level && !trend && !season && !xhat && !error)) { holtwinters_optim_gpu(handle_impl, ts, n, batch_size, frequency, start_level, start_trend, start_season, alpha, optim_alpha, beta, optim_beta, gamma, optim_gamma, level, trend, season, xhat, error, optim_result, seasonal, optim_params_); } } template <typename Dtype> void HoltWintersForecast(const raft::handle_t& handle, Dtype* forecast, int h, int batch_size, int frequency, const Dtype* level_coef, const Dtype* trend_coef, const Dtype* season_coef, ML::SeasonalType seasonal) { const raft::handle_t& handle_impl = handle; raft::stream_syncer _(handle_impl); cudaStream_t stream = handle_impl.get_stream(); ASSERT(!(!level_coef && !trend_coef && !season_coef), "HW error in in line %d", __LINE__); ASSERT(!(season_coef && frequency < 2), "HW error in in line %d", __LINE__); holtwinters_forecast_gpu( handle_impl, forecast, h, batch_size, frequency, level_coef, trend_coef, season_coef, seasonal); } // change optim_gamma to false here to test bug in Double Exponential Smoothing // https://github.com/rapidsai/cuml/issues/889 template <typename Dtype> void HoltWintersFitHelper(const raft::handle_t& handle, int n, int batch_size, int frequency, int start_periods, ML::SeasonalType seasonal, Dtype epsilon, Dtype* data, Dtype* level_d, Dtype* trend_d, Dtype* season_d, Dtype* error_d) { const raft::handle_t& handle_impl = handle; raft::stream_syncer _(handle_impl); cudaStream_t stream = handle_impl.get_stream(); bool optim_alpha = true, optim_beta = true, optim_gamma = true; // initial values for alpha, beta and gamma std::vector<Dtype> alpha_h(batch_size, 0.4); std::vector<Dtype> beta_h(batch_size, 0.3); std::vector<Dtype> gamma_h(batch_size, 0.3); int leveltrend_seed_len, season_seed_len, components_len; int leveltrend_coef_offset, season_coef_offset; int error_len; HoltWintersBufferSize(n, batch_size, frequency, optim_beta, optim_gamma, &leveltrend_seed_len, // = batch_size &season_seed_len, // = frequency*batch_size &components_len, // = (n-w_len)*batch_size &error_len, // = batch_size &leveltrend_coef_offset, // = (n-wlen-1)*batch_size (last row) &season_coef_offset); // = (n-wlen-frequency)*batch_size(last freq rows) rmm::device_uvector<Dtype> dataset_d(batch_size * n, stream); rmm::device_uvector<Dtype> alpha_d(batch_size, stream); raft::update_device(alpha_d.data(), alpha_h.data(), batch_size, stream); rmm::device_uvector<Dtype> level_seed_d(leveltrend_seed_len, stream); rmm::device_uvector<Dtype> beta_d(0, stream); rmm::device_uvector<Dtype> gamma_d(0, stream); rmm::device_uvector<Dtype> trend_seed_d(0, stream); rmm::device_uvector<Dtype> start_season_d(0, stream); if (optim_beta) { beta_d.resize(batch_size, stream); raft::update_device(beta_d.data(), beta_h.data(), batch_size, stream); trend_seed_d.resize(leveltrend_seed_len, stream); } if (optim_gamma) { gamma_d.resize(batch_size, stream); raft::update_device(gamma_d.data(), gamma_h.data(), batch_size, stream); start_season_d.resize(season_seed_len, stream); } // Step 1: transpose the dataset (ML expects col major dataset) HWTranspose(handle, data, batch_size, n, dataset_d.data()); // Step 2: Decompose dataset to get seed for level, trend and seasonal values HoltWintersDecompose(handle, dataset_d.data(), n, batch_size, frequency, level_seed_d.data(), trend_seed_d.data(), start_season_d.data(), start_periods, seasonal); // Step 3: Find optimal alpha, beta and gamma values (seasonal HW) HoltWintersOptim(handle, dataset_d.data(), n, batch_size, frequency, level_seed_d.data(), trend_seed_d.data(), start_season_d.data(), alpha_d.data(), optim_alpha, beta_d.data(), optim_beta, gamma_d.data(), optim_gamma, epsilon, level_d, trend_d, season_d, (Dtype*)nullptr, error_d, (OptimCriterion*)nullptr, (OptimParams<Dtype>*)nullptr, seasonal); } template <typename Dtype> void HoltWintersForecastHelper(const raft::handle_t& handle, int n, int batch_size, int frequency, int h, ML::SeasonalType seasonal, Dtype* level_d, Dtype* trend_d, Dtype* season_d, Dtype* forecast_d) { const raft::handle_t& handle_impl = handle; raft::stream_syncer _(handle_impl); cudaStream_t stream = handle_impl.get_stream(); bool optim_beta = true, optim_gamma = true; int leveltrend_seed_len, season_seed_len, components_len; int leveltrend_coef_offset, season_coef_offset; int error_len; HoltWintersBufferSize(n, batch_size, frequency, optim_beta, optim_gamma, &leveltrend_seed_len, // = batch_size &season_seed_len, // = frequency*batch_size &components_len, // = (n-w_len)*batch_size &error_len, // = batch_size &leveltrend_coef_offset, // = (n-wlen-1)*batch_size (last row) &season_coef_offset); // = (n-wlen-frequency)*batch_size(last freq rows) // Step 4: Do forecast HoltWintersForecast(handle, forecast_d, h, batch_size, frequency, level_d + leveltrend_coef_offset, trend_d + leveltrend_coef_offset, season_d + season_coef_offset, seasonal); } } // namespace ML
0
rapidsai_public_repos/cuml/cpp/src/holtwinters
rapidsai_public_repos/cuml/cpp/src/holtwinters/internal/hw_decompose.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/handle.hpp> #include <raft/util/cudart_utils.hpp> // #TODO: Replace with public header when ready #include <raft/linalg/detail/cublas_wrappers.hpp> // #TODO: Replace with public header when ready #include <raft/linalg/detail/cusolver_wrappers.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include "hw_utils.cuh" // optimize, maybe im2col ? // https://github.com/rapidsai/cuml/issues/891 template <typename Dtype> __global__ void conv1d_kernel(const Dtype* input, int batch_size, const Dtype* filter, int filter_size, Dtype* output, int output_size) { const int tid = GET_TID; if (tid < batch_size) { for (int o = 0; o < output_size; ++o) { Dtype out = 0.; for (int i = 0; i < filter_size; ++i) out += filter[i] * input[tid + (i + o) * batch_size]; output[tid + o * batch_size] = out; } } } template <typename Dtype> void conv1d(const raft::handle_t& handle, const Dtype* input, int batch_size, const Dtype* filter, int filter_size, Dtype* output, int output_size) { int total_threads = batch_size; conv1d_kernel<Dtype> <<<GET_NUM_BLOCKS(total_threads), GET_THREADS_PER_BLOCK(total_threads), 0, handle.get_stream()>>>(input, batch_size, filter, filter_size, output, output_size); } // https://github.com/rapidsai/cuml/issues/891 template <typename Dtype> __global__ void season_mean_kernel(const Dtype* season, int len, int batch_size, Dtype* start_season, int frequency, int half_filter_size, bool ADDITIVE_KERNEL) { int tid = GET_TID; if (tid < batch_size) { Dtype mean = 0.0; for (int i = 0; i < frequency; ++i) { Dtype period_mean = 0.; int c = 0; for (int k = i; k < len; k = k + frequency) { period_mean += season[k * batch_size + tid]; c++; } int count = 1 + ((len - i - 1) / frequency); period_mean /= count; int ss_idx = (i + half_filter_size) % frequency; start_season[ss_idx * batch_size + tid] = period_mean; mean += period_mean; } mean /= frequency; for (int i = 0; i < frequency; ++i) { if (ADDITIVE_KERNEL) start_season[i * batch_size + tid] -= mean; else // MULTIPLOCATIVE start_season[i * batch_size + tid] /= mean; } } } template <typename Dtype> void season_mean(const raft::handle_t& handle, const Dtype* season, int len, int batch_size, Dtype* start_season, int frequency, int half_filter_size, ML::SeasonalType seasonal) { cudaStream_t stream = handle.get_stream(); bool is_additive = seasonal == ML::SeasonalType::ADDITIVE; season_mean_kernel<Dtype> <<<GET_NUM_BLOCKS(batch_size), GET_THREADS_PER_BLOCK(batch_size), 0, stream>>>( season, len, batch_size, start_season, frequency, half_filter_size, is_additive); } template <typename Dtype> __global__ void RinvKernel(const Dtype* A, Dtype* Rinv, int trend_len) { // Inverse of R (2x2 upper triangular matrix) int tid = GET_TID; if (tid == 0) { Dtype a = A[0], b = A[trend_len], d = A[trend_len + 1]; Dtype factor = 1. / (a * d); Rinv[0] = factor * d; Rinv[1] = 0.; Rinv[2] = -factor * b; Rinv[3] = factor * a; } } template <typename Dtype> __global__ void batched_ls_solver_kernel( const Dtype* B, const Dtype* rq, int batch_size, int len, Dtype* level, Dtype* trend) { int tid = GET_TID; if (tid < batch_size) { Dtype level_ = 0., trend_ = 0.; for (int i = 0; i < len; ++i) { Dtype b = B[tid + i * batch_size]; level_ += rq[2 * i] * b; trend_ += rq[2 * i + 1] * b; } level[tid] = level_; trend[tid] = trend_; } } template <typename Dtype> void batched_ls(const raft::handle_t& handle, const Dtype* data, int trend_len, int batch_size, Dtype* level, Dtype* trend) { cudaStream_t stream = handle.get_stream(); cublasHandle_t cublas_h = handle.get_cublas_handle(); cusolverDnHandle_t cusolver_h = handle.get_cusolver_dn_handle(); const Dtype one = (Dtype)1.; const Dtype zero = (Dtype)0.; int geqrf_buffer; int orgqr_buffer; int lwork_size; // Allocate memory std::vector<Dtype> A_h(2 * trend_len); rmm::device_uvector<Dtype> A_d(2 * trend_len, stream); rmm::device_uvector<Dtype> tau_d(2, stream); rmm::device_uvector<Dtype> Rinv_d(4, stream); rmm::device_uvector<Dtype> R1Qt_d(2 * trend_len, stream); rmm::device_scalar<int> dev_info_d(stream); // Prepare A for (int i = 0; i < trend_len; ++i) { A_h[i] = (Dtype)1.; A_h[trend_len + i] = (Dtype)(i + 1); } raft::update_device(A_d.data(), A_h.data(), 2 * trend_len, stream); // #TODO: Call from public API when ready RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDngeqrf_bufferSize<Dtype>( cusolver_h, trend_len, 2, A_d.data(), 2, &geqrf_buffer)); // #TODO: Call from public API when ready RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDnorgqr_bufferSize<Dtype>( cusolver_h, trend_len, 2, 2, A_d.data(), 2, tau_d.data(), &orgqr_buffer)); lwork_size = geqrf_buffer > orgqr_buffer ? geqrf_buffer : orgqr_buffer; rmm::device_uvector<Dtype> lwork_d(lwork_size, stream); // QR decomposition of A // #TODO: Call from public API when ready RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDngeqrf<Dtype>(cusolver_h, trend_len, 2, A_d.data(), trend_len, tau_d.data(), lwork_d.data(), lwork_size, dev_info_d.data(), stream)); // Single thread kernel to inverse R RinvKernel<Dtype><<<1, 1, 0, stream>>>(A_d.data(), Rinv_d.data(), trend_len); // R1QT = inv(R)*transpose(Q) // #TODO: Call from public API when ready RAFT_CUSOLVER_TRY(raft::linalg::detail::cusolverDnorgqr<Dtype>(cusolver_h, trend_len, 2, 2, A_d.data(), trend_len, tau_d.data(), lwork_d.data(), lwork_size, dev_info_d.data(), stream)); // #TODO: Call from public API when ready RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm<Dtype>(cublas_h, CUBLAS_OP_N, CUBLAS_OP_T, 2, trend_len, 2, &one, Rinv_d.data(), 2, A_d.data(), trend_len, &zero, R1Qt_d.data(), 2, stream)); batched_ls_solver_kernel<Dtype> <<<GET_NUM_BLOCKS(batch_size), GET_THREADS_PER_BLOCK(batch_size), 0, stream>>>( data, R1Qt_d.data(), batch_size, trend_len, level, trend); } template <typename Dtype> void stl_decomposition_gpu(const raft::handle_t& handle, const Dtype* ts, int n, int batch_size, int frequency, int start_periods, Dtype* start_level, Dtype* start_trend, Dtype* start_season, ML::SeasonalType seasonal) { cudaStream_t stream = handle.get_stream(); cublasHandle_t cublas_h = handle.get_cublas_handle(); const int end = start_periods * frequency; const int filter_size = (frequency / 2) * 2 + 1; const int trend_len = end - filter_size + 1; // Set filter std::vector<Dtype> filter_h(filter_size, 1. / frequency); if (frequency % 2 == 0) { filter_h.front() /= 2; filter_h.back() /= 2; } rmm::device_uvector<Dtype> filter_d(filter_size, stream); raft::update_device(filter_d.data(), filter_h.data(), filter_size, stream); // Set Trend rmm::device_uvector<Dtype> trend_d(batch_size * trend_len, stream); conv1d<Dtype>(handle, ts, batch_size, filter_d.data(), filter_size, trend_d.data(), trend_len); rmm::device_uvector<Dtype> season_d(batch_size * trend_len, stream); const int ts_offset = (filter_size / 2) * batch_size; if (seasonal == ML::SeasonalType::ADDITIVE) { const Dtype one = 1.; const Dtype minus_one = -1.; // #TODO: Call from public API when ready RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgeam<Dtype>(cublas_h, CUBLAS_OP_N, CUBLAS_OP_N, trend_len, batch_size, &one, ts + ts_offset, trend_len, &minus_one, trend_d.data(), trend_len, season_d.data(), trend_len, stream)); } else { rmm::device_uvector<Dtype> aligned_ts(batch_size * trend_len, stream); raft::copy(aligned_ts.data(), ts + ts_offset, batch_size * trend_len, stream); raft::linalg::eltwiseDivide<Dtype>( season_d.data(), aligned_ts.data(), trend_d.data(), trend_len * batch_size, stream); } season_mean(handle, season_d.data(), trend_len, batch_size, start_season, frequency, filter_size / 2, seasonal); batched_ls(handle, trend_d.data(), trend_len, batch_size, start_level, start_trend); }
0
rapidsai_public_repos/cuml/cpp/src/holtwinters
rapidsai_public_repos/cuml/cpp/src/holtwinters/internal/hw_optim.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "hw_eval.cuh" #include "hw_utils.cuh" #include <raft/util/cudart_utils.hpp> template <typename Dtype> __device__ Dtype golden_step(Dtype a, Dtype b, Dtype c) { Dtype mid = (a + c) * 0.5; if (b > mid) return (a - b) * GOLD; else return (c - b) * GOLD; } template <typename Dtype> __device__ Dtype fix_step(Dtype a, Dtype b, Dtype c, Dtype step, Dtype e) { Dtype min_step = abs_device(e * b) + PG_EPS; if (abs_device(step) < min_step) return step > 0 ? min_step : -min_step; if (abs_device(b + step - a) <= e || abs_device(b + step - c) <= e) return 0.0; // steps are too close to each others return step; } template <typename Dtype> __device__ Dtype calculate_step( Dtype a, Dtype b, Dtype c, Dtype loss_a, Dtype loss_b, Dtype loss_c, Dtype pstep, Dtype e) { // parabola step Dtype p = (b - a) * (loss_b - loss_c); Dtype q = (b - c) * (loss_b - loss_a); Dtype x = q * (b - c) - p * (b - a); Dtype y = (p - q) * 2.; Dtype step = abs_device(y) < PG_EPS ? golden_step(a, b, c) : x / y; step = fix_step(a, b, c, step, e); // ensure point is new if (abs_device(step) > abs_device(pstep / 2) || step == 0.0) step = golden_step(a, b, c); return step; } template <typename Dtype> __device__ void parabolic_interpolation_golden_optim(int tid, const Dtype* ts, int n, int batch_size, int frequency, int shift, Dtype plevel, Dtype ptrend, Dtype* pseason, int pseason_width, const Dtype* start_season, const Dtype* beta, const Dtype* gamma, bool optim_alpha, Dtype* alpha_, bool optim_beta, Dtype* beta_, bool optim_gamma, Dtype* gamma_, Dtype eps, bool ADDITIVE_KERNEL) { Dtype a = (Dtype).0; Dtype b = (Dtype)GOLD; Dtype c = (Dtype)1.; Dtype loss_a = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, optim_alpha ? a : *alpha_, optim_beta ? a : *beta_, optim_gamma ? a : *gamma_, nullptr, nullptr, nullptr, nullptr, ADDITIVE_KERNEL); Dtype loss_b = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, optim_alpha ? b : *alpha_, optim_beta ? b : *beta_, optim_gamma ? b : *gamma_, nullptr, nullptr, nullptr, nullptr, ADDITIVE_KERNEL); Dtype loss_c = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, optim_alpha ? c : *alpha_, optim_beta ? c : *beta_, optim_gamma ? c : *gamma_, nullptr, nullptr, nullptr, nullptr, ADDITIVE_KERNEL); Dtype pstep = (c - a) / 2; Dtype cstep = pstep; while (abs_device(c - a) > abs_device(b * eps) + PG_EPS) { Dtype step = calculate_step(a, b, c, loss_a, loss_b, loss_c, cstep, eps); Dtype optim_val = b + step; Dtype loss_val = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, optim_alpha ? optim_val : *alpha_, optim_beta ? optim_val : *beta_, optim_gamma ? optim_val : *gamma_, nullptr, nullptr, nullptr, nullptr, ADDITIVE_KERNEL); if (loss_val < loss_b) { if (optim_val > b) { SUBSTITUTE(a, b, loss_a, loss_b); } else { SUBSTITUTE(c, b, loss_c, loss_b); } SUBSTITUTE(b, optim_val, loss_b, loss_val); } else { if (optim_val > b) { SUBSTITUTE(c, optim_val, loss_c, loss_val); } else { SUBSTITUTE(a, optim_val, loss_a, loss_val); } } SUBSTITUTE(cstep, pstep, pstep, step); } if (optim_alpha) *alpha_ = b; if (optim_beta) *beta_ = b; if (optim_gamma) *gamma_ = b; } template <typename Dtype> __device__ void holtwinters_finite_gradient_device(int tid, const Dtype* ts, int n, int batch_size, int frequency, int shift, Dtype plevel, Dtype ptrend, Dtype* pseason, int pseason_width, const Dtype* start_season, const Dtype* beta, const Dtype* gamma, Dtype alpha_, Dtype beta_, Dtype gamma_, Dtype* g_alpha, Dtype* g_beta, Dtype* g_gamma, Dtype eps, bool ADDITIVE_KERNEL) { Dtype left_error, right_error; if (g_alpha) { // alpha gradient left_error = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, alpha_ - eps, beta_, gamma_, nullptr, nullptr, nullptr, nullptr, ADDITIVE_KERNEL); right_error = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, alpha_ + eps, beta_, gamma_, nullptr, nullptr, nullptr, nullptr, ADDITIVE_KERNEL); *g_alpha = (right_error - left_error) / (eps * 2.); } if (g_beta) { // beta gradient left_error = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, alpha_, beta_ - eps, gamma_, nullptr, nullptr, nullptr, nullptr, ADDITIVE_KERNEL); right_error = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, alpha_, beta_ + eps, gamma_, nullptr, nullptr, nullptr, nullptr, ADDITIVE_KERNEL); *g_beta = (right_error - left_error) / (eps * 2.); } if (g_gamma) { // gamma gradient left_error = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, alpha_, beta_, gamma_ - eps, nullptr, nullptr, nullptr, nullptr, ADDITIVE_KERNEL); right_error = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, alpha_, beta_, gamma_ + eps, nullptr, nullptr, nullptr, nullptr, ADDITIVE_KERNEL); *g_gamma = (right_error - left_error) / (eps * 2.); } } // There's a bug here, where the wrong values are updated if line search // iter limit is reached. Last values are of nx are put in x, whereas it // should be the nx values which minimised loss. For summary, check // https://github.com/rapidsai/cuml/issues/888 template <typename Dtype> __device__ ML::OptimCriterion holtwinters_bfgs_optim_device( int tid, const Dtype* ts, int n, int batch_size, int frequency, int shift, Dtype plevel, Dtype ptrend, Dtype* pseason, int pseason_width, const Dtype* start_season, const Dtype* beta, const Dtype* gamma, bool optim_alpha, Dtype* x1, bool optim_beta, Dtype* x2, bool optim_gamma, Dtype* x3, const ML::OptimParams<Dtype> optim_params, bool ADDITIVE_KERNEL) { Dtype H11 = 1., H12 = .0, H13 = .0, H22 = 1., H23 = .0, H33 = 1.; // Hessian approximiation (Hessian is symmetric) Dtype g1 = .0, g2 = .0, g3 = .0; // gradients // initial gradient holtwinters_finite_gradient_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, *x1, *x2, *x3, optim_alpha ? &g1 : nullptr, optim_beta ? &g2 : nullptr, optim_gamma ? &g3 : nullptr, optim_params.eps, ADDITIVE_KERNEL); for (int iter = 0; iter < optim_params.bfgs_iter_limit; ++iter) { // Step direction Dtype p1 = -H11 * g1 - H12 * g2 - H13 * g3; Dtype p2 = -H12 * g1 - H22 * g2 - H23 * g3; Dtype p3 = -H13 * g1 - H23 * g2 - H33 * g3; const Dtype phi = p1 * g1 + p2 * g2 + p3 * g3; if (phi > 0) { H11 = 1.; H12 = 0.; H13 = 0.; H22 = 1.; H23 = 0.; H33 = 1.; p1 = -g1; p2 = -g2; p3 = -g3; } // {next_params} = {params}+step_size*p; // start of line search // starting step size, we assume the largest distance between x and nx is going to be sqrt(3)/2. // where sqrt(3) is the largest allowed step in a 1x1x1 cube. Dtype step_size; if (optim_params.linesearch_step_size <= 0) step_size = (Dtype)0.866 / sqrt(p1 * p1 + p2 * p2 + p3 * p3); else step_size = optim_params.linesearch_step_size; Dtype nx1 = *x1 + step_size * p1; Dtype nx2 = *x2 + step_size * p2; Dtype nx3 = *x3 + step_size * p3; // line search params const Dtype cauchy = optim_params.linesearch_c * (g1 * p1 + g2 * p2 + g3 * p3); const Dtype loss_ref = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, *x1, *x2, *x3, nullptr, nullptr, nullptr, nullptr, ADDITIVE_KERNEL); Dtype loss = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, nx1, nx2, nx3, nullptr, nullptr, nullptr, nullptr, ADDITIVE_KERNEL); for (int i = 0; i < optim_params.linesearch_iter_limit && (loss > loss_ref + step_size * cauchy); ++i) { step_size *= optim_params.linesearch_tau; nx1 = *x1 + step_size * p1; nx2 = *x2 + step_size * p2; nx3 = *x3 + step_size * p3; loss = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, nx1, nx2, nx3, nullptr, nullptr, nullptr, nullptr, ADDITIVE_KERNEL); } // end of line search // see if new {params} meet stop condition const Dtype dx1 = abs_device(*x1 - nx1); const Dtype dx2 = abs_device(*x2 - nx2); const Dtype dx3 = abs_device(*x3 - nx3); Dtype max = max3(dx1, dx2, dx3); // update {params} *x1 = nx1; *x2 = nx2; *x3 = nx3; if (optim_params.min_param_diff > max) return ML::OptimCriterion::OPTIM_MIN_PARAM_DIFF; if (optim_params.min_error_diff > abs_device(loss - loss_ref)) return ML::OptimCriterion::OPTIM_MIN_ERROR_DIFF; Dtype ng1 = .0, ng2 = .0, ng3 = .0; // next gradient holtwinters_finite_gradient_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason, pseason_width, start_season, beta, gamma, nx1, nx2, nx3, optim_alpha ? &ng1 : nullptr, optim_beta ? &ng2 : nullptr, optim_gamma ? &ng3 : nullptr, optim_params.eps, ADDITIVE_KERNEL); // see if new gradients meet stop condition max = max3(abs_device(ng1), abs_device(ng2), abs_device(ng3)); if (optim_params.min_grad_norm > max) return ML::OptimCriterion::OPTIM_MIN_GRAD_NORM; // s = step_size*p; const Dtype s1 = step_size * p1; const Dtype s2 = step_size * p2; const Dtype s3 = step_size * p3; // y = next_grad-grad const Dtype y1 = ng1 - g1; const Dtype y2 = ng2 - g2; const Dtype y3 = ng3 - g3; // rho_ = y(*)s; rho = 1/rho_ const Dtype rho_ = y1 * s1 + y2 * s2 + y3 * s3; const Dtype rho = 1.0 / rho_; const Dtype Hy1 = H11 * y1 + H12 * y2 + H13 * y3; const Dtype Hy2 = H12 * y1 + H22 * y2 + H23 * y3; const Dtype Hy3 = H13 * y1 + H23 * y2 + H33 * y3; const Dtype k = rho * rho * (y1 * Hy1 + y2 * Hy2 + y3 * Hy3 + rho_); H11 += k * s1 * s1 - 2. * rho * s1 * Hy1; H12 += k * s1 * s2 - rho * (s2 * Hy1 + s1 * Hy2); H13 += k * s1 * s3 - rho * (s3 * Hy1 + s1 * Hy3); H22 += k * s2 * s2 - 2 * rho * s2 * Hy2; H23 += k * s2 * s3 - rho * (s3 * Hy2 + s2 * Hy3); H33 += k * s3 * s3 - 2. * rho * s3 * Hy3; g1 = ng1; g2 = ng2; g3 = ng3; } return ML::OptimCriterion::OPTIM_BFGS_ITER_LIMIT; } template <typename Dtype> __global__ void holtwinters_optim_gpu_shared_kernel(const Dtype* ts, int n, int batch_size, int frequency, const Dtype* start_level, const Dtype* start_trend, const Dtype* start_season, Dtype* alpha, bool optim_alpha, Dtype* beta, bool optim_beta, Dtype* gamma, bool optim_gamma, Dtype* level, Dtype* trend, Dtype* season, Dtype* xhat, Dtype* error, ML::OptimCriterion* optim_result, const ML::OptimParams<Dtype> optim_params, bool ADDITIVE_KERNEL, bool single_param) { int tid = GET_TID; extern __shared__ __align__(sizeof(Dtype)) unsigned char pseason_[]; Dtype* pseason = reinterpret_cast<Dtype*>(pseason_); if (tid < batch_size) { int shift = 1; ML::OptimCriterion optim; Dtype plevel = start_level[tid], ptrend = .0; Dtype alpha_ = alpha[tid]; Dtype beta_ = beta ? beta[tid] : .0; Dtype gamma_ = gamma ? gamma[tid] : .0; if (gamma) { shift = frequency; ptrend = beta ? start_trend[tid] : .0; } else if (beta) { shift = 2; ptrend = start_trend[tid]; } // Optimization if (single_param) parabolic_interpolation_golden_optim<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason + threadIdx.x, blockDim.x, start_season, beta, gamma, optim_alpha, &alpha_, optim_beta, &beta_, optim_gamma, &gamma_, optim_params.eps, ADDITIVE_KERNEL); else optim = holtwinters_bfgs_optim_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason + threadIdx.x, blockDim.x, start_season, beta, gamma, optim_alpha, &alpha_, optim_beta, &beta_, optim_gamma, &gamma_, optim_params, ADDITIVE_KERNEL); if (optim_alpha) alpha[tid] = bound_device(alpha_); if (optim_beta) beta[tid] = bound_device(beta_); if (optim_gamma) gamma[tid] = bound_device(gamma_); if (!single_param && optim_result) optim_result[tid] = optim; if (error || level || trend || season || xhat) { // Final fit Dtype error_ = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason + threadIdx.x, blockDim.x, start_season, beta, gamma, alpha_, beta_, gamma_, level, trend, season, xhat, ADDITIVE_KERNEL); if (error) error[tid] = error_; } } } template <typename Dtype> __global__ void holtwinters_optim_gpu_global_kernel(const Dtype* ts, int n, int batch_size, int frequency, const Dtype* start_level, const Dtype* start_trend, const Dtype* start_season, Dtype* pseason, Dtype* alpha, bool optim_alpha, Dtype* beta, bool optim_beta, Dtype* gamma, bool optim_gamma, Dtype* level, Dtype* trend, Dtype* season, Dtype* xhat, Dtype* error, ML::OptimCriterion* optim_result, const ML::OptimParams<Dtype> optim_params, bool ADDITIVE_KERNEL, bool single_param) { int tid = GET_TID; if (tid < batch_size) { int shift = 1; ML::OptimCriterion optim; Dtype plevel = start_level[tid], ptrend = .0; Dtype alpha_ = alpha[tid]; Dtype beta_ = beta ? beta[tid] : .0; Dtype gamma_ = gamma ? gamma[tid] : .0; if (gamma) { shift = frequency; ptrend = beta ? start_trend[tid] : .0; } else if (beta) { shift = 2; ptrend = start_trend[tid]; } // Optimization if (single_param) parabolic_interpolation_golden_optim<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason + tid, batch_size, start_season, beta, gamma, optim_alpha, &alpha_, optim_beta, &beta_, optim_gamma, &gamma_, optim_params.eps, ADDITIVE_KERNEL); else optim = holtwinters_bfgs_optim_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason + tid, batch_size, start_season, beta, gamma, optim_alpha, &alpha_, optim_beta, &beta_, optim_gamma, &gamma_, optim_params, ADDITIVE_KERNEL); if (optim_alpha) alpha[tid] = bound_device(alpha_); if (optim_beta) beta[tid] = bound_device(beta_); if (optim_gamma) gamma[tid] = bound_device(gamma_); if (!single_param && optim_result) optim_result[tid] = optim; if (error || level || trend || season || xhat) { // Final fit Dtype error_ = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason + tid, batch_size, start_season, beta, gamma, alpha_, beta_, gamma_, level, trend, season, xhat, ADDITIVE_KERNEL); if (error) error[tid] = error_; } } } // Test Global and Shared kernels // https://github.com/rapidsai/cuml/issues/890 template <typename Dtype> void holtwinters_optim_gpu(const raft::handle_t& handle, const Dtype* ts, int n, int batch_size, int frequency, const Dtype* start_level, const Dtype* start_trend, const Dtype* start_season, Dtype* alpha, bool optim_alpha, Dtype* beta, bool optim_beta, Dtype* gamma, bool optim_gamma, Dtype* level, Dtype* trend, Dtype* season, Dtype* xhat, Dtype* error, ML::OptimCriterion* optim_result, ML::SeasonalType seasonal, const ML::OptimParams<Dtype> optim_params) { cudaStream_t stream = handle.get_stream(); // int total_blocks = GET_NUM_BLOCKS(batch_size); // int threads_per_block = GET_THREADS_PER_BLOCK(batch_size); int total_blocks = (batch_size - 1) / 128 + 1; int threads_per_block = 128; // How much sm needed for shared kernel int sm_needed = sizeof(Dtype) * threads_per_block * frequency; bool is_additive = seasonal == ML::SeasonalType::ADDITIVE; bool single_param = (optim_alpha + optim_beta + optim_gamma > 1) ? false : true; if (sm_needed > raft::getSharedMemPerBlock()) { // Global memory // rmm::device_uvector<Dtype> pseason(batch_size * frequency, stream); holtwinters_optim_gpu_global_kernel<Dtype> <<<total_blocks, threads_per_block, 0, stream>>>(ts, n, batch_size, frequency, start_level, start_trend, start_season, pseason.data(), alpha, optim_alpha, beta, optim_beta, gamma, optim_gamma, level, trend, season, xhat, error, optim_result, optim_params, is_additive, single_param); } else { // Shared memory holtwinters_optim_gpu_shared_kernel<Dtype> <<<total_blocks, threads_per_block, sm_needed, stream>>>(ts, n, batch_size, frequency, start_level, start_trend, start_season, alpha, optim_alpha, beta, optim_beta, gamma, optim_gamma, level, trend, season, xhat, error, optim_result, optim_params, is_additive, single_param); } }
0
rapidsai_public_repos/cuml/cpp/src/holtwinters
rapidsai_public_repos/cuml/cpp/src/holtwinters/internal/hw_forecast.cuh
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "hw_utils.cuh" template <typename Dtype> __global__ void holtwinters_seasonal_forecast_kernel(Dtype* forecast, int h, int batch_size, int frequency, const Dtype* level_coef, const Dtype* trend_coef, const Dtype* season_coef, bool additive) { int tid = GET_TID; if (tid < batch_size) { const Dtype level = (level_coef) ? level_coef[tid] : 0.; const Dtype trend = (trend_coef) ? trend_coef[tid] : 0.; for (int i = 0; i < h; ++i) { const Dtype season = season_coef[tid + (i % frequency) * batch_size]; if (additive) forecast[tid + i * batch_size] = level + trend * (i + 1) + season; else forecast[tid + i * batch_size] = (level + trend * (i + 1)) * season; } } } template <typename Dtype> __global__ void holtwinters_nonseasonal_forecast_kernel( Dtype* forecast, int h, int batch_size, const Dtype* level_coef, const Dtype* trend_coef) { int tid = GET_TID; if (tid < batch_size) { const Dtype level = (level_coef) ? level_coef[tid] : 0.; const Dtype trend = trend_coef[tid]; for (int i = 0; i < h; ++i) forecast[tid + i * batch_size] = level + trend * (i + 1); } } template <typename Dtype> __global__ void holtwinters_level_forecast_kernel(Dtype* forecast, int h, int batch_size, const Dtype* level_coef) { int tid = GET_TID; if (tid < batch_size) { const Dtype level = level_coef[tid]; for (int i = 0; i < h; ++i) forecast[tid + i * batch_size] = level; } } template <typename Dtype> void holtwinters_forecast_gpu(const raft::handle_t& handle, Dtype* forecast, int h, int batch_size, int frequency, const Dtype* level_coef, const Dtype* trend_coef, const Dtype* season_coef, ML::SeasonalType seasonal) { cudaStream_t stream = handle.get_stream(); int total_blocks = GET_NUM_BLOCKS(batch_size); int threads_per_block = GET_THREADS_PER_BLOCK(batch_size); if (trend_coef == nullptr && season_coef == nullptr) { holtwinters_level_forecast_kernel<Dtype> <<<total_blocks, threads_per_block, 0, stream>>>(forecast, h, batch_size, level_coef); } else if (season_coef == nullptr) { holtwinters_nonseasonal_forecast_kernel<Dtype><<<total_blocks, threads_per_block, 0, stream>>>( forecast, h, batch_size, level_coef, trend_coef); } else { bool is_additive = seasonal == ML::SeasonalType::ADDITIVE; holtwinters_seasonal_forecast_kernel<Dtype><<<total_blocks, threads_per_block, 0, stream>>>( forecast, h, batch_size, frequency, level_coef, trend_coef, season_coef, is_additive); } }
0
rapidsai_public_repos/cuml/cpp/src/holtwinters
rapidsai_public_repos/cuml/cpp/src/holtwinters/internal/hw_eval.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "hw_utils.cuh" #include <raft/core/handle.hpp> #include <raft/util/cudart_utils.hpp> template <typename Dtype> __device__ Dtype holtwinters_eval_device(int tid, const Dtype* ts, int n, int batch_size, int frequency, int shift, Dtype plevel, Dtype ptrend, Dtype* pseason, int pseason_width, const Dtype* start_season, const Dtype* beta, const Dtype* gamma, Dtype alpha_, Dtype beta_, Dtype gamma_, Dtype* level, Dtype* trend, Dtype* season, Dtype* xhat, bool additive_seasonal) { alpha_ = bound_device(alpha_); beta_ = bound_device(beta_); gamma_ = bound_device(gamma_); Dtype error_ = .0; Dtype clevel = .0, ctrend = .0, cseason = .0; for (int i = 0; i < n - shift; i++) { int s = i % frequency; Dtype pts = ts[IDX(tid, i + shift, batch_size)]; Dtype leveltrend = plevel + ptrend; // xhat Dtype stmp; if (gamma) stmp = i < frequency ? start_season[IDX(tid, i, batch_size)] : pseason[s * pseason_width]; else stmp = (!additive_seasonal); Dtype xhat_ = plevel + ptrend; if (additive_seasonal) xhat_ += stmp; else xhat_ *= stmp; // Error Dtype diff = pts - xhat_; error_ += diff * diff; // Level if (additive_seasonal) { clevel = alpha_ * (pts - stmp) + (1 - alpha_) * (leveltrend); } else { Dtype stmp_eps = abs(stmp) > STMP_EPS ? stmp : STMP_EPS; clevel = alpha_ * (pts / stmp_eps) + (1 - alpha_) * (leveltrend); } // Trend if (beta) { ctrend = beta_ * (clevel - plevel) + (1 - beta_) * ptrend; ptrend = ctrend; } // Seasonal if (gamma) { if (additive_seasonal) cseason = gamma_ * (pts - clevel) + (1 - gamma_) * stmp; else cseason = gamma_ * (pts / clevel) + (1 - gamma_) * stmp; pseason[s * pseason_width] = cseason; } plevel = clevel; if (level) level[IDX(tid, i, batch_size)] = clevel; if (trend) trend[IDX(tid, i, batch_size)] = ctrend; if (season) season[IDX(tid, i, batch_size)] = cseason; if (xhat) xhat[IDX(tid, i, batch_size)] = xhat_; } return error_; } template <typename Dtype> __global__ void holtwinters_eval_gpu_shared_kernel(const Dtype* ts, int n, int batch_size, int frequency, const Dtype* start_level, const Dtype* start_trend, const Dtype* start_season, const Dtype* alpha, const Dtype* beta, const Dtype* gamma, Dtype* level, Dtype* trend, Dtype* season, Dtype* xhat, Dtype* error, bool additive_seasonal) { int tid = GET_TID; extern __shared__ __align__(sizeof(Dtype)) unsigned char pseason_[]; Dtype* pseason = reinterpret_cast<Dtype*>(pseason_); if (tid < batch_size) { int shift = 1; Dtype plevel = start_level[tid], ptrend = .0; Dtype alpha_ = alpha[tid]; Dtype beta_ = beta ? beta[tid] : .0; Dtype gamma_ = gamma ? gamma[tid] : .0; if (gamma) { shift = frequency; ptrend = beta ? start_trend[tid] : .0; } else if (beta) { shift = 2; ptrend = start_trend[tid]; } Dtype error_ = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason + threadIdx.x, blockDim.x, start_season, beta, gamma, alpha_, beta_, gamma_, level, trend, season, xhat, additive_seasonal); if (error) error[tid] = error_; } } template <typename Dtype> __global__ void holtwinters_eval_gpu_global_kernel(const Dtype* ts, int n, int batch_size, int frequency, const Dtype* start_level, const Dtype* start_trend, const Dtype* start_season, Dtype* pseason, const Dtype* alpha, const Dtype* beta, const Dtype* gamma, Dtype* level, Dtype* trend, Dtype* season, Dtype* xhat, Dtype* error, bool additive_seasonal) { int tid = GET_TID; if (tid < batch_size) { int shift = 1; Dtype plevel = start_level[tid], ptrend = .0; Dtype alpha_ = alpha[tid]; Dtype beta_ = beta ? beta[tid] : .0; Dtype gamma_ = gamma ? gamma[tid] : .0; if (gamma) { shift = frequency; ptrend = beta ? start_trend[tid] : .0; } else if (beta) { shift = 2; ptrend = start_trend[tid]; } Dtype error_ = holtwinters_eval_device<Dtype>(tid, ts, n, batch_size, frequency, shift, plevel, ptrend, pseason + tid, batch_size, start_season, beta, gamma, alpha_, beta_, gamma_, level, trend, season, xhat, additive_seasonal); if (error) error[tid] = error_; } } // Test global and shared kernels // https://github.com/rapidsai/cuml/issues/890 template <typename Dtype> void holtwinters_eval_gpu(const raft::handle_t& handle, const Dtype* ts, int n, int batch_size, int frequency, const Dtype* start_level, const Dtype* start_trend, const Dtype* start_season, const Dtype* alpha, const Dtype* beta, const Dtype* gamma, Dtype* level, Dtype* trend, Dtype* season, Dtype* xhat, Dtype* error, ML::SeasonalType seasonal) { cudaStream_t stream = handle.get_stream(); int total_blocks = GET_NUM_BLOCKS(batch_size); int threads_per_block = GET_THREADS_PER_BLOCK(batch_size); // How much sm needed for shared kernel int sm_needed = sizeof(Dtype) * threads_per_block * frequency; bool is_additive = seasonal == ML::SeasonalType::ADDITIVE; if (sm_needed > raft::getSharedMemPerBlock()) { rmm::device_uvector<Dtype> pseason(batch_size * frequency, stream); holtwinters_eval_gpu_global_kernel<Dtype> <<<total_blocks, threads_per_block, 0, stream>>>(ts, n, batch_size, frequency, start_level, start_trend, start_season, pseason.data(), alpha, beta, gamma, level, trend, season, xhat, error, is_additive); } else { holtwinters_eval_gpu_shared_kernel<Dtype> <<<total_blocks, threads_per_block, sm_needed, stream>>>(ts, n, batch_size, frequency, start_level, start_trend, start_season, alpha, beta, gamma, level, trend, season, xhat, error, is_additive); } }
0
rapidsai_public_repos/cuml/cpp/src/holtwinters
rapidsai_public_repos/cuml/cpp/src/holtwinters/internal/hw_utils.cuh
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda_runtime.h> #include <cuml/tsa/holtwinters_params.h> #include <iostream> #include <raft/linalg/eltwise.cuh> #include <raft/util/cudart_utils.hpp> #include <vector> #define IDX(n, m, N) (n + (m) * (N)) #define STMP_EPS (1e-6) #define GOLD \ 0.381966011250105151795413165634361882279690820194237137864551377294739537181097550292792795810608862515245 #define PG_EPS 1e-10 #define SUBSTITUTE(a, b, c, d) \ (a) = (b); \ (c) = (d); #define MAX_BLOCKS_PER_DIM 65535 #define GET_TID (blockIdx.x * blockDim.x + threadIdx.x) inline int GET_THREADS_PER_BLOCK(const int n, const int max_threads = 512) { int ret; if (n <= 128) ret = 32; else if (n <= 1024) ret = 128; else ret = 512; return ret > max_threads ? max_threads : ret; } inline int GET_NUM_BLOCKS(const int n, const int max_threads = 512, const int max_blocks = MAX_BLOCKS_PER_DIM) { int ret = (n - 1) / GET_THREADS_PER_BLOCK(n, max_threads) + 1; return ret > max_blocks ? max_blocks : ret; } template <typename Dtype> __device__ Dtype abs_device(Dtype val) { int nbytes = sizeof(val); if (nbytes == sizeof(float)) return fabsf(val); else return fabs(val); } template <typename Dtype> __device__ Dtype bound_device(Dtype val, Dtype min = .0, Dtype max = 1.) { int nbytes = sizeof(val); if (nbytes == sizeof(float)) return fminf(fmaxf(val, min), max); else return fmin(fmax(val, min), max); } template <typename Dtype> __device__ Dtype max3(Dtype a, Dtype b, Dtype c) { return a > b ? (a > c ? a : c) : (b > c ? b : c); }
0
rapidsai_public_repos/cuml/cpp/src
rapidsai_public_repos/cuml/cpp/src/kmeans/kmeans_transform.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <raft/core/handle.hpp> #include <raft/cluster/kmeans.cuh> #include <raft/cluster/kmeans_types.hpp> namespace ML { namespace kmeans { // ----------------------------- transform ---------------------------------// template <typename value_t, typename idx_t> void transform_impl(const raft::handle_t& handle, const raft::cluster::KMeansParams& params, const value_t* centroids, const value_t* X, idx_t n_samples, idx_t n_features, value_t* X_new) { auto X_view = raft::make_device_matrix_view<const value_t, idx_t>(X, n_samples, n_features); auto centroids_view = raft::make_device_matrix_view<const value_t, idx_t>(centroids, params.n_clusters, n_features); auto rX_new = raft::make_device_matrix_view<value_t, idx_t>(X_new, n_samples, n_features); raft::cluster::kmeans::transform<value_t, idx_t>(handle, params, X_view, centroids_view, rX_new); } void transform(const raft::handle_t& handle, const raft::cluster::KMeansParams& params, const float* centroids, const float* X, int n_samples, int n_features, float* X_new) { transform_impl(handle, params, centroids, X, n_samples, n_features, X_new); } void transform(const raft::handle_t& handle, const raft::cluster::KMeansParams& params, const double* centroids, const double* X, int n_samples, int n_features, double* X_new) { transform_impl(handle, params, centroids, X, n_samples, n_features, X_new); } void transform(const raft::handle_t& handle, const raft::cluster::KMeansParams& params, const float* centroids, const float* X, int64_t n_samples, int64_t n_features, float* X_new) { transform_impl(handle, params, centroids, X, n_samples, n_features, X_new); } void transform(const raft::handle_t& handle, const raft::cluster::KMeansParams& params, const double* centroids, const double* X, int64_t n_samples, int64_t n_features, double* X_new) { transform_impl(handle, params, centroids, X, n_samples, n_features, X_new); } }; // end namespace kmeans }; // end namespace ML
0